summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWookey <wookey@wookware.org>2017-03-30 13:53:36 +0100
committerWookey <wookey@wookware.org>2017-03-30 13:53:36 +0100
commit276eafa36c0098280da50ce2a2b54b5ad764ca8b (patch)
treeae8a4f592e409ec656fc288d3a28bc6b16d95ada
Initial repo of dkms mali driver
-rw-r--r--Makefile28
-rwxr-xr-xcommon.postinst292
-rwxr-xr-xdebian/README.Debian5
-rwxr-xr-xdebian/changelog6
-rwxr-xr-xdebian/compat1
-rwxr-xr-xdebian/control11
-rwxr-xr-xdebian/copyright2
-rwxr-xr-xdebian/dirs1
-rwxr-xr-xdebian/postinst49
-rwxr-xr-xdebian/prerm28
-rwxr-xr-xdebian/rules54
-rwxr-xr-xmidgard-0.r2p0/Kbuild233
-rwxr-xr-xmidgard-0.r2p0/Kconfig225
-rwxr-xr-xmidgard-0.r2p0/Makefile42
-rwxr-xr-xmidgard-0.r2p0/Makefile.kbase17
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/Kbuild62
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_backend_config.h29
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_cache_policy_backend.c29
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_cache_policy_backend.h34
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_debug_job_fault_backend.c157
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_devfreq.c299
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_devfreq.h24
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_device_hw.c255
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_device_internal.h67
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_gpu.c123
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_gpuprops_backend.c105
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_instr_backend.c492
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_instr_defs.h58
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_instr_internal.h45
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_irq_internal.h39
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_irq_linux.c469
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_jm_as.c378
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_jm_defs.h123
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_jm_hw.c1560
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_jm_internal.h155
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_jm_rb.c1818
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_jm_rb.h76
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_js_affinity.c303
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_js_affinity.h129
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_js_backend.c357
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_js_internal.h69
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_mmu_hw_direct.c403
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_mmu_hw_direct.h42
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_always_on.c63
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_always_on.h77
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_backend.c460
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_ca.c179
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_ca.h92
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_ca_fixed.c65
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_ca_fixed.h40
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_coarse_demand.c70
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_coarse_demand.h64
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_defs.h504
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_demand.c73
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_demand.h64
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_driver.c1548
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_internal.h550
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_metrics.c401
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_policy.c969
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_pm_policy.h227
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_power_model_simple.c169
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_power_model_simple.h47
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_time.c103
-rwxr-xr-xmidgard-0.r2p0/backend/gpu/mali_kbase_time.h52
-rw-r--r--midgard-0.r2p0/dkms.conf6
-rwxr-xr-xmidgard-0.r2p0/docs/Doxyfile126
-rwxr-xr-xmidgard-0.r2p0/docs/policy_operation_diagram.dot112
-rwxr-xr-xmidgard-0.r2p0/docs/policy_overview.dot63
-rwxr-xr-xmidgard-0.r2p0/mali_base_hwconfig_features.h223
-rwxr-xr-xmidgard-0.r2p0/mali_base_hwconfig_issues.h1014
-rwxr-xr-xmidgard-0.r2p0/mali_base_kernel.h1819
-rwxr-xr-xmidgard-0.r2p0/mali_base_kernel_sync.h47
-rwxr-xr-xmidgard-0.r2p0/mali_base_mem_priv.h52
-rwxr-xr-xmidgard-0.r2p0/mali_base_vendor_specific_func.h24
-rwxr-xr-xmidgard-0.r2p0/mali_kbase.h607
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_10969_workaround.c209
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_10969_workaround.h23
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_as_fault_debugfs.c102
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_as_fault_debugfs.h45
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_cache_policy.c64
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_cache_policy.h45
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_config.c51
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_config.h345
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_config_defaults.h260
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_context.c321
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_context.h90
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_core_linux.c4062
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_debug.c39
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_debug.h164
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_debug_job_fault.c502
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_debug_job_fault.h96
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_debug_mem_view.c279
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_debug_mem_view.h25
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_defs.h1539
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_device.c697
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_disjoint_events.c76
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_dma_fence.c606
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_dma_fence.h150
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_event.c259
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gator.h45
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gator_api.c330
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gator_api.h219
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gator_hwcnt_names.h2164
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gator_hwcnt_names_thex.h291
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gator_hwcnt_names_tmix.h291
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gpu_id.h113
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gpu_memory_debugfs.c97
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gpu_memory_debugfs.h37
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gpuprops.c314
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gpuprops.h64
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_gpuprops_types.h92
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hw.c272
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hw.h52
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hwaccess_backend.h54
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hwaccess_defs.h36
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hwaccess_gpuprops.h47
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hwaccess_instr.h116
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hwaccess_jm.h377
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hwaccess_pm.h209
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hwaccess_time.h53
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_hwcnt_reader.h66
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_ipa.c431
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_ipa.h41
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_ipa_tables.h104
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_jd.c1880
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_jd_debugfs.c123
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_jd_debugfs.h39
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_jm.c131
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_jm.h110
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_js.c2947
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_js.h1058
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_js_ctx_attr.c303
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_js_ctx_attr.h158
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_js_defs.h466
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_js_policy.h763
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_js_policy_cfs.c292
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_js_policy_cfs.h81
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_linux.h43
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem.c2490
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem.h1078
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_linux.c2819
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_linux.h216
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_lowlevel.h45
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_pool.c594
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_pool_debugfs.c81
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_pool_debugfs.h36
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_profile_debugfs.c119
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_profile_debugfs.h59
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mem_profile_debugfs_buf_size.h33
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mmu.c2059
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mmu_hw.h123
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mmu_mode.h47
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mmu_mode_aarch64.c200
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_mmu_mode_lpae.c206
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_platform_fake.c124
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_pm.c205
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_pm.h171
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_profiling_gator_api.h40
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_regs_history_debugfs.c130
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_regs_history_debugfs.h50
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_replay.c1166
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_smc.c74
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_smc.h67
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_softjobs.c1504
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_strings.c23
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_strings.h19
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_sync.c182
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_sync.h100
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_sync_user.c156
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_tlstream.c2342
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_tlstream.h558
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_trace_defs.h264
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_trace_timeline.c236
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_trace_timeline.h363
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_trace_timeline_defs.h140
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_uku.h551
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_utility.c33
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_utility.h37
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_vinstr.c2040
-rwxr-xr-xmidgard-0.r2p0/mali_kbase_vinstr.h155
-rwxr-xr-xmidgard-0.r2p0/mali_linux_kbase_trace.h201
-rwxr-xr-xmidgard-0.r2p0/mali_linux_trace.h189
-rwxr-xr-xmidgard-0.r2p0/mali_malisw.h131
-rwxr-xr-xmidgard-0.r2p0/mali_midg_coherency.h26
-rwxr-xr-xmidgard-0.r2p0/mali_midg_regmap.h580
-rwxr-xr-xmidgard-0.r2p0/mali_timeline.h396
-rwxr-xr-xmidgard-0.r2p0/mali_uk.h141
-rw-r--r--midgard-0.r2p0/patches/arm64-standalone-headers.patch532
-rwxr-xr-xmidgard-0.r2p0/platform/Kbuild21
-rwxr-xr-xmidgard-0.r2p0/platform/Kconfig24
-rwxr-xr-xmidgard-0.r2p0/platform/devicetree/Kbuild22
-rwxr-xr-xmidgard-0.r2p0/platform/devicetree/mali_kbase_config_devicetree.c31
-rwxr-xr-xmidgard-0.r2p0/platform/devicetree/mali_kbase_config_platform.h80
-rwxr-xr-xmidgard-0.r2p0/platform/devicetree/mali_kbase_runtime_pm.c100
-rwxr-xr-xmidgard-0.r2p0/platform/juno_soc/Kbuild19
-rwxr-xr-xmidgard-0.r2p0/platform/juno_soc/juno_mali_opp.c84
-rwxr-xr-xmidgard-0.r2p0/platform/juno_soc/mali_kbase_config_juno_soc.c156
-rwxr-xr-xmidgard-0.r2p0/platform/juno_soc/mali_kbase_config_platform.h84
-rwxr-xr-xmidgard-0.r2p0/platform/mali_kbase_platform_common.h26
-rwxr-xr-xmidgard-0.r2p0/platform/mali_kbase_platform_fake.h38
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress/Kbuild18
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress/mali_kbase_config_platform.h82
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress/mali_kbase_config_vexpress.c85
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress/mali_kbase_cpu_vexpress.c279
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress/mali_kbase_cpu_vexpress.h38
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress_1xv7_a57/Kbuild16
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h80
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c79
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/Kbuild18
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h82
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c83
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c71
-rwxr-xr-xmidgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.h28
-rwxr-xr-xmidgard-0.r2p0/platform_dummy/mali_ukk_os.h53
-rwxr-xr-xmidgard-0.r2p0/sconscript85
215 files changed, 67875 insertions, 0 deletions
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..57e13d8
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,28 @@
+#/usr/bin/make
+SRC = $(DESTDIR)/usr/src
+SHARE = $(DESTDIR)/usr/share/$(NAME)-dkms
+
+all:
+
+clean:
+
+install:
+
+#source tree
+ifeq ("$(wildcard $(NAME)-$(VERSION))", "$(NAME)-$(VERSION)")
+ install -d "$(SRC)"
+ cp -a $(NAME)-$(VERSION) $(SRC)
+ chmod 644 -R "$(SRC)/$(NAME)-$(VERSION)"
+endif
+
+#tarball, possibly with binaries
+ifeq ("$(wildcard $(NAME)-$(VERSION).dkms.tar.gz)", "$(NAME)-$(VERSION).dkms.tar.gz")
+ install -d "$(SHARE)"
+ install -m 644 $(NAME)-$(VERSION).dkms.tar.gz "$(SHARE)"
+endif
+
+#postinst, only if we are supporting legacy mode
+ifeq ("$(wildcard common.postinst)", "common.postinst")
+ install -d "$(SHARE)"
+ install -m 755 $(PREFIX)/usr/lib/dkms/common.postinst $(SHARE)/postinst
+endif
diff --git a/common.postinst b/common.postinst
new file mode 100755
index 0000000..d2ac712
--- /dev/null
+++ b/common.postinst
@@ -0,0 +1,292 @@
+#!/bin/sh
+# Copyright (C) 2002-2005 Flavio Stanchina
+# Copyright (C) 2005-2006 Aric Cyr
+# Copyright (C) 2007 Mario Limonciello
+# Copyright (C) 2009 Alberto Milone
+
+set -e
+
+uname_s=$(uname -s)
+
+_get_kernel_dir() {
+ KVER=$1
+ case ${uname_s} in
+ Linux) DIR="/lib/modules/$KVER/build" ;;
+ GNU/kFreeBSD) DIR="/usr/src/kfreebsd-headers-$KVER/sys" ;;
+ esac
+ echo $DIR
+}
+
+_check_kernel_dir() {
+ DIR=$(_get_kernel_dir $1)
+ case ${uname_s} in
+ Linux) test -e $DIR/include ;;
+ GNU/kFreeBSD) test -e $DIR/kern && test -e $DIR/conf/kmod.mk ;;
+ *) return 1 ;;
+ esac
+ return $?
+}
+
+# Check the existence of a kernel named as $1
+_is_kernel_name_correct() {
+ CORRECT="no"
+ KERNEL_NAME=$1
+
+ for kernel in /boot/config-*; do
+ KERNEL=${kernel#*-}
+ if [ "${KERNEL}" = "${KERNEL_NAME}" ]; then
+ CORRECT="yes"
+ break
+ fi
+ done
+
+ echo $CORRECT
+}
+
+
+# Get the most recent kernel on Debian based systems. This keeps
+# into account both the version and the ABI. If the current kernel
+# is the most recent kernel then the function will print a null string.
+_get_newest_kernel_debian() {
+ NEWEST_KERNEL=
+ NEWEST_VERSION=
+ NEWEST_ABI=
+
+ for kernel in /boot/config-*; do
+ [ -f "$kernel" ] || continue
+ KERNEL=${kernel#*-}
+ KERNEL_VERSION=${KERNEL%%-*}
+ ABI=${KERNEL#*-}
+ ABI=${ABI%%-*}
+
+ if [ -z "$NEWEST_KERNEL" ]; then
+ # The 1st time get a version which is bigger than $1
+ COMPARE_TO=$1
+ else
+ # Get the biggest version
+ COMPARE_TO="$NEWEST_VERSION-$NEWEST_ABI"
+ fi
+
+ # if $kernel is greater than $COMPARE_TO
+ if [ `dpkg --compare-versions "$KERNEL_VERSION-$ABI" gt "$COMPARE_TO" && echo "yes" || \
+ echo "no"` = "yes" ]; then
+ NEWEST_KERNEL=$KERNEL
+ NEWEST_VERSION=$KERNEL_VERSION
+ NEWEST_ABI=$ABI
+ fi
+ done
+
+ echo "$NEWEST_KERNEL"
+}
+
+# Get the most recent kernel in Rhel based systems. If the current kernel
+# is the most recent kernel then the function will print a null string.
+_get_newest_kernel_rhel() {
+ NEWEST_KERNEL=
+
+ LAST_INSTALLED_KERNEL=$(rpm -q --whatprovides kernel --last | grep kernel -m1 | cut -f1 -d' ')
+
+ LIK_FORMATTED_NAME=$(rpm -q $LAST_INSTALLED_KERNEL --queryformat="%{VERSION}-%{RELEASE}.%{ARCH}\n")
+
+ if [ `echo $LIK_FORMATTED_NAME | grep 2.6 >/dev/null` ]; then
+ # Fedora and Suse
+ NEWEST_KERNEL=$LIK_FORMATTED_NAME
+ else
+ # Hack for Mandriva where $LIK_FORMATTED_NAME is broken
+ LIK_NAME=$(rpm -q $LAST_INSTALLED_KERNEL --queryformat="%{NAME}\n")
+ LIK_TYPE=${LIK_NAME#kernel-}
+ LIK_TYPE=${LIK_TYPE%%-*}
+ LIK_STRIPPED=${LIK_NAME#kernel-}
+ LIK_STRIPPED=${LIK_STRIPPED#$LIK_TYPE-}
+ LIK_STRIPPED_BASE=${LIK_STRIPPED%%-*}
+ LIK_STRIPPED_END=${LIK_STRIPPED#$LIK_STRIPPED_BASE-}
+ LIK_FINAL=$LIK_STRIPPED_BASE-$LIK_TYPE-$LIK_STRIPPED_END
+
+ NEWEST_KERNEL=$LIK_FINAL
+ fi
+
+ echo $NEWEST_KERNEL
+}
+
+# Get the newest kernel on Debian and Rhel based systems.
+get_newest_kernel() {
+ NEWEST_KERNEL=
+ # Try Debian first as rpm can be installed in Debian based distros
+ if [ -e /usr/bin/dpkg ]; then
+ # If DEB based
+ CURRENT_KERNEL=$1
+ CURRENT_VERSION=${CURRENT_KERNEL%%-*}
+ CURRENT_ABI=${CURRENT_KERNEL#*-}
+ CURRENT_FLAVOUR=${CURRENT_ABI#*-}
+ CURRENT_ABI=${CURRENT_ABI%%-*}
+ NEWEST_KERNEL=$(_get_newest_kernel_debian "$CURRENT_VERSION-$CURRENT_ABI")
+
+ elif [ `which rpm >/dev/null` ]; then
+ # If RPM based
+ NEWEST_KERNEL=$(_get_newest_kernel_rhel)
+ fi
+
+ # Make sure that kernel name that we extracted corresponds to an installed
+ # kernel
+ if [ -n "$NEWEST_KERNEL" ] && [ `_is_kernel_name_correct $NEWEST_KERNEL` = "no" ]; then
+ NEWEST_KERNEL=
+ fi
+
+ echo $NEWEST_KERNEL
+}
+
+NAME=$1
+VERSION=$2
+TARBALL_ROOT=$3
+ARCH=$4
+UPGRADE=$5
+
+if [ -z "$NAME" ] || [ -z "$VERSION" ]; then
+ echo "Need NAME, and VERSION defined"
+ echo "ARCH is optional"
+ exit 1
+fi
+
+# read framework configuration options
+if [ -r /etc/dkms/framework.conf ]; then
+ . /etc/dkms/framework.conf
+fi
+
+KERNELS=$(ls /lib/modules/ 2>/dev/null || true)
+CURRENT_KERNEL=$(uname -r)
+
+#We never want to keep an older version side by side to prevent conflicts
+if [ -e "/var/lib/dkms/$NAME/$VERSION" ]; then
+ echo "Removing old $NAME-$VERSION DKMS files..."
+ dkms remove -m $NAME -v $VERSION --all
+fi
+
+#Load new files, by source package and by tarball
+if [ -f "$TARBALL_ROOT/$NAME-$VERSION.dkms.tar.gz" ]; then
+ if ! dkms ldtarball --archive "$TARBALL_ROOT/$NAME-$VERSION.dkms.tar.gz"; then
+ echo ""
+ echo ""
+ echo "Unable to load DKMS tarball $TARBALL_ROOT/$NAME-$VERSION.dkms.tar.gz."
+ echo "Common causes include: "
+ echo " - You must be using DKMS 2.1.0.0 or later to support binaries only"
+ echo " distribution specific archives."
+ echo " - Corrupt distribution specific archive"
+ echo ""
+ echo ""
+ exit 2
+ fi
+elif [ -d "/usr/src/$NAME-$VERSION" ]; then
+ echo "Loading new $NAME-$VERSION DKMS files..."
+ dkms add -m $NAME -v $VERSION > /dev/null
+fi
+
+# On 1st installation, let us look for a directory
+# in /lib/modules which matches `uname -r`. If none
+# is found it is possible that buildd is being used
+# and that uname -r is giving us the name of the
+# kernel used by the buildd machine.
+#
+# If this is the case we try to build the kernel
+# module for each kernel which has a directory in
+# /lib/modules. Furthermore we will have to tell
+# DKMS which architecture it should build the module
+# for (e.g. if the buildd machine is using a
+# 2.6.24-23-xen 64bit kernel).
+#
+# NOTE: if the headers are not installed then the
+# module won't be built, as usual
+
+# Here we look for the most recent kernel so that we can
+# build the module for it (in addition to doing it for the
+# current kernel.
+NEWEST_KERNEL=$(get_newest_kernel "$KERNELS")
+
+if [ -z "$autoinstall_all_kernels" ]; then
+ # If the current kernel is installed on the system or chroot
+ if [ `_is_kernel_name_correct $CURRENT_KERNEL` = "yes" ]; then
+ if [ -n "$NEWEST_KERNEL" ] && [ ${CURRENT_KERNEL} != ${NEWEST_KERNEL} ]; then
+ KERNELS="$CURRENT_KERNEL $NEWEST_KERNEL"
+ else
+ KERNELS=$CURRENT_KERNEL
+ fi
+ # The current kernel is not useful as it's not installed
+ else
+ echo "It is likely that $CURRENT_KERNEL belongs to a chroot's host"
+
+ # Let's use only the newest kernel if this is not a first installation
+ # otherwise build for all kernels
+ if [ -n "$NEWEST_KERNEL" -a -n "$UPGRADE" ]; then
+ KERNELS="$NEWEST_KERNEL"
+ fi
+ fi
+fi
+
+# Take care of displaying newline separated list
+echo "Building for $KERNELS" | tr '\n' ',' \
+ | sed -e 's/,/, /g; s/, $/\n/; s/, \([^,]\+\)$/ and \1/'
+
+if [ -n "$ARCH" ]; then
+ if which lsb_release >/dev/null && [ $(lsb_release -s -i) = "Ubuntu" ]; then
+ case $ARCH in
+ amd64)
+ ARCH="x86_64"
+ ;;
+ lpia|i?86)
+ ARCH="i686"
+ ;;
+ esac
+ fi
+ echo "Building for architecture $ARCH"
+ ARCH="-a $ARCH"
+fi
+
+for KERNEL in $KERNELS; do
+ dkms_status=`dkms status -m $NAME -v $VERSION -k $KERNEL $ARCH`
+ if [ `echo $KERNEL | grep -c "BOOT"` -gt 0 ]; then
+ echo ""
+ echo "Module build and install for $KERNEL was skipped as "
+ echo "it is a BOOT variant"
+ continue
+ fi
+
+
+ #if the module isn't yet built, try to build it
+ if [ `echo $dkms_status | grep -c ": built"` -eq 0 ]; then
+ if [ ! -L /var/lib/dkms/$NAME/$VERSION/source ]; then
+ echo "This package appears to be a binaries-only package"
+ echo " you will not be able to build against kernel $KERNEL"
+ echo " since the package source was not provided"
+ continue
+ fi
+ if _check_kernel_dir $KERNEL; then
+ echo "Building initial module for $KERNEL"
+ set +e
+ dkms build -m $NAME -v $VERSION -k $KERNEL $ARCH > /dev/null
+ case $? in
+ 9)
+ set -e
+ echo "Skipped."
+ continue
+ ;;
+ 0)
+ set -e
+ echo "Done."
+ ;;
+ *)
+ exit $?
+ ;;
+ esac
+ dkms_status=`dkms status -m $NAME -v $VERSION -k $KERNEL $ARCH`
+ else
+ echo "Module build for kernel $KERNEL was skipped since the"
+ echo "kernel headers for this kernel does not seem to be installed."
+ fi
+ fi
+
+ #if the module is built (either pre-built or just now), install it
+ if [ `echo $dkms_status | grep -c ": built"` -eq 1 ] &&
+ [ `echo $dkms_status | grep -c ": installed"` -eq 0 ]; then
+ dkms install -m $NAME -v $VERSION -k $KERNEL $ARCH
+ fi
+done
+
diff --git a/debian/README.Debian b/debian/README.Debian
new file mode 100755
index 0000000..4ad0735
--- /dev/null
+++ b/debian/README.Debian
@@ -0,0 +1,5 @@
+midgard DKMS module for Debian
+
+This package was automatically generated by the DKMS system,
+for distribution on Debian based operating systems.
+
diff --git a/debian/changelog b/debian/changelog
new file mode 100755
index 0000000..6a8eb65
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,6 @@
+midgard-dkms (0.r2p0) stable; urgency=low
+
+ * Automatically packaged by DKMS.
+
+ -- Dynamic Kernel Modules Support Team <pkg-dkms-maint@lists.alioth.debian.org> Thu, 05 Jan 2017 18:52:00 +0000
+
diff --git a/debian/compat b/debian/compat
new file mode 100755
index 0000000..7f8f011
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+7
diff --git a/debian/control b/debian/control
new file mode 100755
index 0000000..40e5eec
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,11 @@
+Source: midgard-dkms
+Section: misc
+Priority: optional
+Maintainer: Dynamic Kernel Modules Support Team <pkg-dkms-maint@lists.alioth.debian.org>
+Build-Depends: debhelper (>= 7), dkms
+Standards-Version: 3.8.1
+
+Package: midgard-dkms
+Architecture: all
+Depends: dkms (>= 1.95), ${misc:Depends}
+Description: midgard driver in DKMS format.
diff --git a/debian/copyright b/debian/copyright
new file mode 100755
index 0000000..ad983f3
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,2 @@
+
+This copyright has not been completed by the author of this package.
diff --git a/debian/dirs b/debian/dirs
new file mode 100755
index 0000000..b601f22
--- /dev/null
+++ b/debian/dirs
@@ -0,0 +1 @@
+usr/src
diff --git a/debian/postinst b/debian/postinst
new file mode 100755
index 0000000..b7c0ba4
--- /dev/null
+++ b/debian/postinst
@@ -0,0 +1,49 @@
+#!/bin/sh
+# Copyright (C) 2002-2005 Flavio Stanchina
+# Copyright (C) 2005-2006 Aric Cyr
+# Copyright (C) 2007 Mario Limonciello
+# Copyright (C) 2009 Alberto Milone
+
+set -e
+
+NAME=midgard
+PACKAGE_NAME=$NAME-dkms
+DEB_NAME=$(echo $PACKAGE_NAME | sed 's,_,-,')
+CVERSION=`dpkg-query -W -f='${Version}' $DEB_NAME | awk -F "-" '{print $1}' | cut -d\: -f2`
+ARCH=`dpkg --print-architecture`
+
+dkms_configure () {
+ for POSTINST in /usr/lib/dkms/common.postinst "/usr/share/$PACKAGE_NAME/postinst"; do
+ if [ -f "$POSTINST" ]; then
+ "$POSTINST" "$NAME" "$CVERSION" "/usr/share/$PACKAGE_NAME" "$ARCH" "$2"
+ return $?
+ fi
+ echo "WARNING: $POSTINST does not exist." >&2
+ done
+ echo "ERROR: DKMS version is too old and $PACKAGE_NAME was not" >&2
+ echo "built with legacy DKMS support." >&2
+ echo "You must either rebuild $PACKAGE_NAME with legacy postinst" >&2
+ echo "support or upgrade DKMS to a more current version." >&2
+ return 1
+}
+
+case "$1" in
+ configure)
+ dkms_configure
+ ;;
+
+ abort-upgrade|abort-remove|abort-deconfigure)
+ ;;
+
+ *)
+ echo "postinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/prerm b/debian/prerm
new file mode 100755
index 0000000..a0674d5
--- /dev/null
+++ b/debian/prerm
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+NAME=midgard
+VERSION=0.r2p0
+
+set -e
+
+case "$1" in
+ remove|upgrade|deconfigure)
+ if [ "`dkms status -m $NAME`" ]; then
+ dkms remove -m $NAME -v $VERSION --all
+ fi
+ ;;
+
+ failed-upgrade)
+ ;;
+
+ *)
+ echo "prerm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..73327e9
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,54 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+DEB_NAME=midgard
+NAME=midgard
+VERSION=0.r2p0
+
+configure: configure-stamp
+configure-stamp:
+ dh_testdir
+ touch configure-stamp
+
+
+build: build-stamp
+
+build-stamp: configure-stamp
+ dh_testdir
+ $(MAKE)
+ touch $@
+
+clean:
+ dh_testdir
+ dh_testroot
+ rm -f build-stamp configure-stamp
+ -$(MAKE) clean
+ dh_clean
+
+install: build
+ dh_testdir
+ dh_testroot
+ dh_prep
+ dh_installdirs
+ $(MAKE) DESTDIR=$(CURDIR)/debian/$(DEB_NAME)-dkms NAME=$(NAME) VERSION=$(VERSION) install
+
+binary-arch: build install
+
+binary-indep: build install
+ dh_testdir
+ dh_testroot
+ dh_link
+ dh_strip
+ dh_compress
+ dh_fixperms
+ dh_installdeb
+ dh_shlibdeps
+ dh_gencontrol
+ dh_md5sums
+ dh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install configure
diff --git a/midgard-0.r2p0/Kbuild b/midgard-0.r2p0/Kbuild
new file mode 100755
index 0000000..0568f4b
--- /dev/null
+++ b/midgard-0.r2p0/Kbuild
@@ -0,0 +1,233 @@
+#
+# (C) COPYRIGHT 2012,2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+# Driver version string which is returned to userspace via an ioctl
+MALI_RELEASE_NAME ?= "r2p0-03rel0"
+
+# Paths required for build
+KBASE_PATH = $(src)
+KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy
+UMP_PATH = $(src)/../../../base
+
+ifeq ($(CONFIG_MALI_ERROR_INJECTION),y)
+MALI_ERROR_INJECT_ON = 1
+endif
+
+# Set up defaults if not defined by build system
+MALI_CUSTOMER_RELEASE ?= 1
+MALI_UNIT_TEST ?= 0
+MALI_KERNEL_TEST_API ?= 0
+MALI_ERROR_INJECT_ON ?= 0
+MALI_MOCK_TEST ?= 0
+MALI_COVERAGE ?= 0
+MALI_INSTRUMENTATION_LEVEL ?= 0
+# This workaround is for what seems to be a compiler bug we observed in
+# GCC 4.7 on AOSP 4.3. The bug caused an intermittent failure compiling
+# the "_Pragma" syntax, where an error message is returned:
+#
+# "internal compiler error: unspellable token PRAGMA"
+#
+# This regression has thus far only been seen on the GCC 4.7 compiler bundled
+# with AOSP 4.3.0. So this makefile, intended for in-tree kernel builds
+# which are not known to be used with AOSP, is hardcoded to disable the
+# workaround, i.e. set the define to 0.
+MALI_GCC_WORKAROUND_MIDCOM_4598 ?= 0
+
+# Set up our defines, which will be passed to gcc
+DEFINES = \
+ -DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+ -DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
+ -DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
+ -DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \
+ -DMALI_MOCK_TEST=$(MALI_MOCK_TEST) \
+ -DMALI_COVERAGE=$(MALI_COVERAGE) \
+ -DMALI_INSTRUMENTATION_LEVEL=$(MALI_INSTRUMENTATION_LEVEL) \
+ -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \
+ -DMALI_GCC_WORKAROUND_MIDCOM_4598=$(MALI_GCC_WORKAROUND_MIDCOM_4598)
+
+ifeq ($(KBUILD_EXTMOD),)
+# in-tree
+DEFINES +=-DMALI_KBASE_THIRDPARTY_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)
+else
+# out-of-tree
+DEFINES +=-DMALI_KBASE_THIRDPARTY_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)
+endif
+
+DEFINES += -I$(srctree)/drivers/staging/android
+
+# Use our defines when compiling
+ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(OSK_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+
+SRC := \
+ mali_kbase_device.c \
+ mali_kbase_cache_policy.c \
+ mali_kbase_mem.c \
+ mali_kbase_mmu.c \
+ mali_kbase_ipa.c \
+ mali_kbase_jd.c \
+ mali_kbase_jd_debugfs.c \
+ mali_kbase_jm.c \
+ mali_kbase_gpuprops.c \
+ mali_kbase_js.c \
+ mali_kbase_js_ctx_attr.c \
+ mali_kbase_event.c \
+ mali_kbase_context.c \
+ mali_kbase_pm.c \
+ mali_kbase_config.c \
+ mali_kbase_vinstr.c \
+ mali_kbase_softjobs.c \
+ mali_kbase_10969_workaround.c \
+ mali_kbase_hw.c \
+ mali_kbase_utility.c \
+ mali_kbase_debug.c \
+ mali_kbase_trace_timeline.c \
+ mali_kbase_gpu_memory_debugfs.c \
+ mali_kbase_mem_linux.c \
+ mali_kbase_core_linux.c \
+ mali_kbase_sync.c \
+ mali_kbase_sync_user.c \
+ mali_kbase_replay.c \
+ mali_kbase_mem_profile_debugfs.c \
+ mali_kbase_mmu_mode_lpae.c \
+ mali_kbase_mmu_mode_aarch64.c \
+ mali_kbase_disjoint_events.c \
+ mali_kbase_gator_api.c \
+ mali_kbase_debug_mem_view.c \
+ mali_kbase_debug_job_fault.c \
+ mali_kbase_smc.c \
+ mali_kbase_mem_pool.c \
+ mali_kbase_mem_pool_debugfs.c \
+ mali_kbase_tlstream.c \
+ mali_kbase_strings.c \
+ mali_kbase_as_fault_debugfs.c \
+ mali_kbase_regs_history_debugfs.c
+
+ifeq ($(MALI_UNIT_TEST),1)
+ SRC += mali_kbase_tlstream_test.c
+endif
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+ SRC += mali_kbase_regs_dump_debugfs.c
+endif
+
+
+# Job Scheduler Policy: Completely Fair Scheduler
+SRC += mali_kbase_js_policy_cfs.c
+
+ccflags-y += -I$(KBASE_PATH)
+
+ifeq ($(CONFIG_MALI_PLATFORM_FAKE),y)
+ SRC += mali_kbase_platform_fake.c
+
+ ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS),y)
+ SRC += platform/vexpress/mali_kbase_config_vexpress.c \
+ platform/vexpress/mali_kbase_cpu_vexpress.c
+ ccflags-y += -I$(src)/platform/vexpress
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_RTSM_VE),y)
+ SRC += platform/rtsm_ve/mali_kbase_config_vexpress.c
+ ccflags-y += -I$(src)/platform/rtsm_ve
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_JUNO),y)
+ SRC += platform/juno/mali_kbase_config_vexpress.c
+ ccflags-y += -I$(src)/platform/juno
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_JUNO_SOC),y)
+ SRC += platform/juno_soc/mali_kbase_config_juno_soc.c
+ ccflags-y += -I$(src)/platform/juno_soc
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_1XV7_A57),y)
+ SRC += platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
+ ccflags-y += -I$(src)/platform/vexpress_1xv7_a57
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_6XVIRTEX7_10MHZ),y)
+ SRC += platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c \
+ platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c
+ ccflags-y += -I$(src)/platform/vexpress_6xvirtex7_10mhz
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_A7_KIPLING),y)
+ SRC += platform/a7_kipling/mali_kbase_config_a7_kipling.c \
+ platform/a7_kipling/mali_kbase_cpu_a7_kipling.c
+ ccflags-y += -I$(src)/platform/a7_kipling
+ endif
+
+ ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y)
+ # remove begin and end quotes from the Kconfig string type
+ platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME))
+ MALI_PLATFORM_THIRDPARTY_DIR := platform/$(platform_name)
+ ccflags-y += -I$(src)/$(MALI_PLATFORM_THIRDPARTY_DIR)
+ ifeq ($(CONFIG_MALI_MIDGARD),m)
+ include $(src)/platform/$(platform_name)/Kbuild
+ else ifeq ($(CONFIG_MALI_MIDGARD),y)
+ obj-$(CONFIG_MALI_MIDGARD) += platform/
+ endif
+ endif
+endif # CONFIG_MALI_PLATFORM_FAKE=y
+
+ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y)
+# remove begin and end quotes from the Kconfig string type
+platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME))
+MALI_PLATFORM_THIRDPARTY_DIR := platform/$(platform_name)
+ccflags-y += -I$(src)/$(MALI_PLATFORM_THIRDPARTY_DIR)
+ifeq ($(CONFIG_MALI_MIDGARD),m)
+include $(src)/platform/$(platform_name)/Kbuild
+else ifeq ($(CONFIG_MALI_MIDGARD),y)
+obj-$(CONFIG_MALI_MIDGARD) += platform/
+endif
+endif
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o
+
+# Tell the Linux build system to enable building of our .c files
+mali_kbase-y := $(SRC:.c=.o)
+
+mali_kbase-$(CONFIG_MALI_DMA_FENCE) += mali_kbase_dma_fence.o
+
+MALI_BACKEND_PATH ?= backend
+CONFIG_MALI_BACKEND ?= gpu
+CONFIG_MALI_BACKEND_REAL ?= $(CONFIG_MALI_BACKEND)
+
+ifeq ($(MALI_MOCK_TEST),1)
+ifeq ($(CONFIG_MALI_BACKEND_REAL),gpu)
+# Test functionality
+mali_kbase-y += tests/internal/src/mock/mali_kbase_pm_driver_mock.o
+endif
+endif
+
+include $(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL)/Kbuild
+mali_kbase-y += $(BACKEND:.c=.o)
+
+ccflags-y += -I$(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL)
+subdir-ccflags-y += -I$(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL)
+
+# Default to devicetree platform if neither a fake platform or a thirdparty
+# platform is configured.
+ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY)$(CONFIG_MALI_PLATFORM_FAKE),)
+CONFIG_MALI_PLATFORM_DEVICETREE := y
+endif
+
+mali_kbase-$(CONFIG_MALI_PLATFORM_DEVICETREE) += \
+ platform/devicetree/mali_kbase_runtime_pm.o \
+ platform/devicetree/mali_kbase_config_devicetree.o
+ccflags-$(CONFIG_MALI_PLATFORM_DEVICETREE) += -I$(src)/platform/devicetree
diff --git a/midgard-0.r2p0/Kconfig b/midgard-0.r2p0/Kconfig
new file mode 100755
index 0000000..201832b
--- /dev/null
+++ b/midgard-0.r2p0/Kconfig
@@ -0,0 +1,225 @@
+#
+# (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+menuconfig MALI_MIDGARD
+ tristate "Mali Midgard series support"
+ select GPU_TRACEPOINTS if ANDROID
+ default n
+ help
+ Enable this option to build support for a ARM Mali Midgard GPU.
+
+ To compile this driver as a module, choose M here:
+ this will generate a single module, called mali_kbase.
+
+config MALI_GATOR_SUPPORT
+ bool "Streamline support via Gator"
+ depends on MALI_MIDGARD
+ default n
+ help
+ Adds diagnostic support for use with the ARM Streamline Performance Analyzer.
+ You will need the Gator device driver already loaded before loading this driver when enabling
+ Streamline debug support.
+ This is a legacy interface required by older versions of Streamline.
+
+config MALI_MIDGARD_DVFS
+ bool "Enable legacy DVFS"
+ depends on MALI_MIDGARD && !MALI_DEVFREQ && !MALI_PLATFORM_DEVICETREE
+ default n
+ help
+ Choose this option to enable legacy DVFS in the Mali Midgard DDK.
+
+config MALI_MIDGARD_ENABLE_TRACE
+ bool "Enable kbase tracing"
+ depends on MALI_MIDGARD
+ default n
+ help
+ Enables tracing in kbase. Trace log available through
+ the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+
+config MALI_DEVFREQ
+ bool "devfreq support for Mali"
+ depends on MALI_MIDGARD && PM_DEVFREQ
+ help
+ Support devfreq for Mali.
+
+ Using the devfreq framework and, by default, the simpleondemand
+ governor, the frequency of Mali will be dynamically selected from the
+ available OPPs.
+
+config MALI_DMA_FENCE
+ bool "DMA_BUF fence support for Mali"
+ depends on MALI_MIDGARD && !KDS
+ default n
+ help
+ Support DMA_BUF fences for Mali.
+
+ This option should only be enabled if KDS is not present and
+ the Linux Kernel has built in support for DMA_BUF fences.
+
+# MALI_EXPERT configuration options
+
+menuconfig MALI_EXPERT
+ depends on MALI_MIDGARD
+ bool "Enable Expert Settings"
+ default n
+ help
+ Enabling this option and modifying the default settings may produce a driver with performance or
+ other limitations.
+
+config MALI_PRFCNT_SET_SECONDARY
+ bool "Use secondary set of performance counters"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Select this option to use secondary set of performance counters. Kernel
+ features that depend on an access to the primary set of counters may
+ become unavailable. Enabling this option will prevent power management
+ from working optimally and may cause instrumentation tools to return
+ bogus results.
+
+ If unsure, say N.
+
+config MALI_PLATFORM_FAKE
+ bool "Enable fake platform device support"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ When you start to work with the Mali Midgard series device driver the platform-specific code of
+ the Linux kernel for your platform may not be complete. In this situation the kernel device driver
+ supports creating the platform device outside of the Linux platform-specific code.
+ Enable this option if would like to use a platform device configuration from within the device driver.
+
+choice
+ prompt "Platform configuration"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default MALI_PLATFORM_DEVICETREE
+ help
+ Select the SOC platform that contains a Mali Midgard GPU
+
+config MALI_PLATFORM_DEVICETREE
+ bool "Device Tree platform"
+ depends on OF
+ help
+ Select this option to use Device Tree with the Mali driver.
+
+ When using this option the Mali driver will get the details of the
+ GPU hardware from the Device Tree. This means that the same driver
+ binary can run on multiple platforms as long as all the GPU hardware
+ details are described in the device tree.
+
+ Device Tree is the recommended method for the Mali driver platform
+ integration.
+
+config MALI_PLATFORM_VEXPRESS
+ depends on ARCH_VEXPRESS && (ARCH_VEXPRESS_CA9X4 || ARCH_VEXPRESS_CA15X4)
+ bool "Versatile Express"
+config MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ
+ depends on ARCH_VEXPRESS && (ARCH_VEXPRESS_CA9X4 || ARCH_VEXPRESS_CA15X4)
+ bool "Versatile Express w/Virtex7 @ 40Mhz"
+config MALI_PLATFORM_GOLDFISH
+ depends on ARCH_GOLDFISH
+ bool "Android Goldfish virtual CPU"
+config MALI_PLATFORM_PBX
+ depends on ARCH_REALVIEW && REALVIEW_EB_A9MP && MACH_REALVIEW_PBX
+ bool "Realview PBX-A9"
+config MALI_PLATFORM_THIRDPARTY
+ bool "Third Party Platform"
+endchoice
+
+config MALI_PLATFORM_THIRDPARTY_NAME
+ depends on MALI_MIDGARD && MALI_PLATFORM_THIRDPARTY && MALI_EXPERT
+ string "Third party platform name"
+ help
+ Enter the name of a third party platform that is supported. The third part configuration
+ file must be in midgard/config/tpip/mali_kbase_config_xxx.c where xxx is the name
+ specified here.
+
+config MALI_DEBUG
+ bool "Debug build"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Select this option for increased checking and reporting of errors.
+
+config MALI_FENCE_DEBUG
+ bool "Debug sync fence usage"
+ depends on MALI_MIDGARD && MALI_EXPERT && SYNC
+ default y if MALI_DEBUG
+ help
+ Select this option to enable additional checking and reporting on the
+ use of sync fences in the Mali driver.
+
+ This will add a 3s timeout to all sync fence waits in the Mali
+ driver, so that when work for Mali has been waiting on a sync fence
+ for a long time a debug message will be printed, detailing what fence
+ is causing the block, and which dependent Mali atoms are blocked as a
+ result of this.
+
+ The timeout can be changed at runtime through the js_soft_timeout
+ device attribute, where the timeout is specified in milliseconds.
+
+config MALI_NO_MALI
+ bool "No Mali"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ This can be used to test the driver in a simulated environment
+ whereby the hardware is not physically present. If the hardware is physically
+ present it will not be used. This can be used to test the majority of the
+ driver without needing actual hardware or for software benchmarking.
+ All calls to the simulated hardware will complete immediately as if the hardware
+ completed the task.
+
+config MALI_ERROR_INJECT
+ bool "Error injection"
+ depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI
+ default n
+ help
+ Enables insertion of errors to test module failure and recovery mechanisms.
+
+config MALI_TRACE_TIMELINE
+ bool "Timeline tracing"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Enables timeline tracing through the kernel tracepoint system.
+
+config MALI_SYSTEM_TRACE
+ bool "Enable system event tracing support"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Choose this option to enable system trace events for each
+ kbase event. This is typically used for debugging but has
+ minimal overhead when not in use. Enable only if you know what
+ you are doing.
+
+config MALI_GPU_MMU_AARCH64
+ bool "Use AArch64 page tables"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Use AArch64 format page tables for the GPU instead of LPAE-style.
+ The two formats have the same functionality and performance but a
+ future GPU may deprecate or remove the legacy LPAE-style format.
+
+ The LPAE-style format is supported on all Midgard and current Bifrost
+ GPUs. Enabling AArch64 format restricts the driver to only supporting
+ Bifrost GPUs.
+
+ If in doubt, say N.
+
+source "drivers/gpu/arm/midgard/platform/Kconfig"
diff --git a/midgard-0.r2p0/Makefile b/midgard-0.r2p0/Makefile
new file mode 100755
index 0000000..e1625e6
--- /dev/null
+++ b/midgard-0.r2p0/Makefile
@@ -0,0 +1,42 @@
+#
+# (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+KDIR ?= /lib/modules/$(shell uname -r)/build
+
+BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
+UMP_PATH_RELATIVE = $(CURDIR)/../../../base/ump
+KBASE_PATH_RELATIVE = $(CURDIR)
+KDS_PATH_RELATIVE = $(CURDIR)/../../../..
+EXTRA_SYMBOLS = $(UMP_PATH_RELATIVE)/src/Module.symvers
+
+ifeq ($(MALI_UNIT_TEST), 1)
+ EXTRA_SYMBOLS += $(KBASE_PATH_RELATIVE)/tests/internal/src/kernel_assert_module/linux/Module.symvers
+endif
+
+ifeq ($(MALI_BUS_LOG), 1)
+#Add bus logger symbols
+EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers
+endif
+
+# GPL driver supports KDS
+EXTRA_SYMBOLS += $(KDS_PATH_RELATIVE)/drivers/base/kds/Module.symvers
+
+# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
+all:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) clean
diff --git a/midgard-0.r2p0/Makefile.kbase b/midgard-0.r2p0/Makefile.kbase
new file mode 100755
index 0000000..2bef9c2
--- /dev/null
+++ b/midgard-0.r2p0/Makefile.kbase
@@ -0,0 +1,17 @@
+#
+# (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+EXTRA_CFLAGS += -I$(ROOT) -I$(KBASE_PATH) -I$(OSK_PATH)/src/linux/include -I$(KBASE_PATH)/platform_$(PLATFORM)
+
diff --git a/midgard-0.r2p0/backend/gpu/Kbuild b/midgard-0.r2p0/backend/gpu/Kbuild
new file mode 100755
index 0000000..a39df41
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/Kbuild
@@ -0,0 +1,62 @@
+#
+# (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+BACKEND += \
+ backend/gpu/mali_kbase_cache_policy_backend.c \
+ backend/gpu/mali_kbase_device_hw.c \
+ backend/gpu/mali_kbase_gpu.c \
+ backend/gpu/mali_kbase_gpuprops_backend.c \
+ backend/gpu/mali_kbase_debug_job_fault_backend.c \
+ backend/gpu/mali_kbase_irq_linux.c \
+ backend/gpu/mali_kbase_instr_backend.c \
+ backend/gpu/mali_kbase_jm_as.c \
+ backend/gpu/mali_kbase_jm_hw.c \
+ backend/gpu/mali_kbase_jm_rb.c \
+ backend/gpu/mali_kbase_js_affinity.c \
+ backend/gpu/mali_kbase_js_backend.c \
+ backend/gpu/mali_kbase_mmu_hw_direct.c \
+ backend/gpu/mali_kbase_pm_backend.c \
+ backend/gpu/mali_kbase_pm_driver.c \
+ backend/gpu/mali_kbase_pm_metrics.c \
+ backend/gpu/mali_kbase_pm_ca.c \
+ backend/gpu/mali_kbase_pm_ca_fixed.c \
+ backend/gpu/mali_kbase_pm_always_on.c \
+ backend/gpu/mali_kbase_pm_coarse_demand.c \
+ backend/gpu/mali_kbase_pm_demand.c \
+ backend/gpu/mali_kbase_pm_policy.c \
+ backend/gpu/mali_kbase_time.c
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+BACKEND += \
+ backend/gpu/mali_kbase_pm_ca_random.c \
+ backend/gpu/mali_kbase_pm_demand_always_powered.c \
+ backend/gpu/mali_kbase_pm_fast_start.c
+endif
+
+ifeq ($(CONFIG_MALI_DEVFREQ),y)
+BACKEND += backend/gpu/mali_kbase_devfreq.c
+endif
+
+ifeq ($(CONFIG_MALI_NO_MALI),y)
+ # Dummy model
+ BACKEND += backend/gpu/mali_kbase_model_dummy.c
+ BACKEND += backend/gpu/mali_kbase_model_linux.c
+ # HW error simulation
+ BACKEND += backend/gpu/mali_kbase_model_error_generator.c
+endif
+
+ifeq ($(CONFIG_DEVFREQ_THERMAL),y)
+ BACKEND += backend/gpu/mali_kbase_power_model_simple.c
+endif
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_backend_config.h b/midgard-0.r2p0/backend/gpu/mali_kbase_backend_config.h
new file mode 100755
index 0000000..c8ae87e
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_backend_config.h
@@ -0,0 +1,29 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Backend specific configuration
+ */
+
+#ifndef _KBASE_BACKEND_CONFIG_H_
+#define _KBASE_BACKEND_CONFIG_H_
+
+/* Enable GPU reset API */
+#define KBASE_GPU_RESET_EN 1
+
+#endif /* _KBASE_BACKEND_CONFIG_H_ */
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_cache_policy_backend.c b/midgard-0.r2p0/backend/gpu/mali_kbase_cache_policy_backend.c
new file mode 100755
index 0000000..fef9a2c
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_cache_policy_backend.c
@@ -0,0 +1,29 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "backend/gpu/mali_kbase_cache_policy_backend.h"
+#include <backend/gpu/mali_kbase_device_internal.h>
+
+void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+ u32 mode)
+{
+ kbdev->current_gpu_coherency_mode = mode;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG))
+ kbase_reg_write(kbdev, COHERENCY_ENABLE, mode, NULL);
+}
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_cache_policy_backend.h b/midgard-0.r2p0/backend/gpu/mali_kbase_cache_policy_backend.h
new file mode 100755
index 0000000..fe98691
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_cache_policy_backend.h
@@ -0,0 +1,34 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+#ifndef _KBASE_CACHE_POLICY_BACKEND_H_
+#define _KBASE_CACHE_POLICY_BACKEND_H_
+
+#include "mali_kbase.h"
+#include "mali_base_kernel.h"
+
+/**
+ * kbase_cache_set_coherency_mode() - Sets the system coherency mode
+ * in the GPU.
+ * @kbdev: Device pointer
+ * @mode: Coherency mode. COHERENCY_ACE/ACE_LITE
+ */
+void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+ u32 mode);
+
+#endif /* _KBASE_CACHE_POLICY_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_debug_job_fault_backend.c b/midgard-0.r2p0/backend/gpu/mali_kbase_debug_job_fault_backend.c
new file mode 100755
index 0000000..7851ea6
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_debug_job_fault_backend.c
@@ -0,0 +1,157 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include "mali_kbase_debug_job_fault.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/*GPU_CONTROL_REG(r)*/
+static int gpu_control_reg_snapshot[] = {
+ GPU_ID,
+ SHADER_READY_LO,
+ SHADER_READY_HI,
+ TILER_READY_LO,
+ TILER_READY_HI,
+ L2_READY_LO,
+ L2_READY_HI
+};
+
+/* JOB_CONTROL_REG(r) */
+static int job_control_reg_snapshot[] = {
+ JOB_IRQ_MASK,
+ JOB_IRQ_STATUS
+};
+
+/* JOB_SLOT_REG(n,r) */
+static int job_slot_reg_snapshot[] = {
+ JS_HEAD_LO,
+ JS_HEAD_HI,
+ JS_TAIL_LO,
+ JS_TAIL_HI,
+ JS_AFFINITY_LO,
+ JS_AFFINITY_HI,
+ JS_CONFIG,
+ JS_STATUS,
+ JS_HEAD_NEXT_LO,
+ JS_HEAD_NEXT_HI,
+ JS_AFFINITY_NEXT_LO,
+ JS_AFFINITY_NEXT_HI,
+ JS_CONFIG_NEXT
+};
+
+/*MMU_REG(r)*/
+static int mmu_reg_snapshot[] = {
+ MMU_IRQ_MASK,
+ MMU_IRQ_STATUS
+};
+
+/* MMU_AS_REG(n,r) */
+static int as_reg_snapshot[] = {
+ AS_TRANSTAB_LO,
+ AS_TRANSTAB_HI,
+ AS_MEMATTR_LO,
+ AS_MEMATTR_HI,
+ AS_FAULTSTATUS,
+ AS_FAULTADDRESS_LO,
+ AS_FAULTADDRESS_HI,
+ AS_STATUS
+};
+
+bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
+ int reg_range)
+{
+ int i, j;
+ int offset = 0;
+ int slot_number;
+ int as_number;
+
+ if (kctx->reg_dump == NULL)
+ return false;
+
+ slot_number = kctx->kbdev->gpu_props.num_job_slots;
+ as_number = kctx->kbdev->gpu_props.num_address_spaces;
+
+ /* get the GPU control registers*/
+ for (i = 0; i < sizeof(gpu_control_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] =
+ GPU_CONTROL_REG(gpu_control_reg_snapshot[i]);
+ offset += 2;
+ }
+
+ /* get the Job control registers*/
+ for (i = 0; i < sizeof(job_control_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] =
+ JOB_CONTROL_REG(job_control_reg_snapshot[i]);
+ offset += 2;
+ }
+
+ /* get the Job Slot registers*/
+ for (j = 0; j < slot_number; j++) {
+ for (i = 0; i < sizeof(job_slot_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] =
+ JOB_SLOT_REG(j, job_slot_reg_snapshot[i]);
+ offset += 2;
+ }
+ }
+
+ /* get the MMU registers*/
+ for (i = 0; i < sizeof(mmu_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] = MMU_REG(mmu_reg_snapshot[i]);
+ offset += 2;
+ }
+
+ /* get the Address space registers*/
+ for (j = 0; j < as_number; j++) {
+ for (i = 0; i < sizeof(as_reg_snapshot)/4; i++) {
+ kctx->reg_dump[offset] =
+ MMU_AS_REG(j, as_reg_snapshot[i]);
+ offset += 2;
+ }
+ }
+
+ WARN_ON(offset >= (reg_range*2/4));
+
+ /* set the termination flag*/
+ kctx->reg_dump[offset] = REGISTER_DUMP_TERMINATION_FLAG;
+ kctx->reg_dump[offset + 1] = REGISTER_DUMP_TERMINATION_FLAG;
+
+ dev_dbg(kctx->kbdev->dev, "kbase_job_fault_reg_snapshot_init:%d\n",
+ offset);
+
+ return true;
+}
+
+bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx)
+{
+ int offset = 0;
+
+ if (kctx->reg_dump == NULL)
+ return false;
+
+ while (kctx->reg_dump[offset] != REGISTER_DUMP_TERMINATION_FLAG) {
+ kctx->reg_dump[offset+1] =
+ kbase_reg_read(kctx->kbdev,
+ kctx->reg_dump[offset], NULL);
+ offset += 2;
+ }
+ return true;
+}
+
+
+#endif
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_devfreq.c b/midgard-0.r2p0/backend/gpu/mali_kbase_devfreq.c
new file mode 100755
index 0000000..e0752ab
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_devfreq.c
@@ -0,0 +1,299 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_config_defaults.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <backend/gpu/mali_kbase_power_model_simple.h>
+#endif
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else /* Linux >= 3.13 */
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#include <linux/opp.h>
+#define dev_pm_opp opp
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp_get_opp_count opp_get_opp_count
+#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
+#endif /* Linux >= 3.13 */
+
+
+static int
+kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ unsigned long freq = 0;
+ unsigned long voltage;
+ int err;
+
+ freq = *target_freq;
+
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, &freq, flags);
+ voltage = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ if (IS_ERR_OR_NULL(opp)) {
+ dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ /*
+ * Only update if there is a change of frequency
+ */
+ if (kbdev->current_freq == freq) {
+ *target_freq = freq;
+ return 0;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (kbdev->regulator && kbdev->current_voltage != voltage
+ && kbdev->current_freq < freq) {
+ err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
+ if (err) {
+ dev_err(dev, "Failed to increase voltage (%d)\n", err);
+ return err;
+ }
+ }
+#endif
+
+ err = clk_set_rate(kbdev->clock, freq);
+ if (err) {
+ dev_err(dev, "Failed to set clock %lu (target %lu)\n",
+ freq, *target_freq);
+ return err;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (kbdev->regulator && kbdev->current_voltage != voltage
+ && kbdev->current_freq > freq) {
+ err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
+ if (err) {
+ dev_err(dev, "Failed to decrease voltage (%d)\n", err);
+ return err;
+ }
+ }
+#endif
+
+ *target_freq = freq;
+ kbdev->current_voltage = voltage;
+ kbdev->current_freq = freq;
+
+ kbase_tlstream_aux_devfreq_target((u64)freq);
+
+ kbase_pm_reset_dvfs_utilisation(kbdev);
+
+ return err;
+}
+
+static int
+kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+ *freq = kbdev->current_freq;
+
+ return 0;
+}
+
+static int
+kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+ stat->current_frequency = kbdev->current_freq;
+
+ kbase_pm_get_dvfs_utilisation(kbdev,
+ &stat->total_time, &stat->busy_time);
+
+ stat->private_data = NULL;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+ if (kbdev->devfreq_cooling)
+ memcpy(&kbdev->devfreq_cooling->last_status, stat,
+ sizeof(*stat));
+#endif
+#endif
+
+ return 0;
+}
+
+static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
+ struct devfreq_dev_profile *dp)
+{
+ int count;
+ int i = 0;
+ unsigned long freq = 0;
+ struct dev_pm_opp *opp;
+
+ rcu_read_lock();
+ count = dev_pm_opp_get_opp_count(kbdev->dev);
+ if (count < 0) {
+ rcu_read_unlock();
+ return count;
+ }
+ rcu_read_unlock();
+
+ dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
+ GFP_KERNEL);
+ if (!dp->freq_table)
+ return -ENOMEM;
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++, freq++) {
+ opp = dev_pm_opp_find_freq_ceil(kbdev->dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dp->freq_table[i] = freq;
+ }
+ rcu_read_unlock();
+
+ if (count != i)
+ dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n",
+ count, i);
+
+ dp->max_state = i;
+
+ return 0;
+}
+
+static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev)
+{
+ struct devfreq_dev_profile *dp = kbdev->devfreq->profile;
+
+ kfree(dp->freq_table);
+}
+
+static void kbase_devfreq_exit(struct device *dev)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+ kbase_devfreq_term_freq_table(kbdev);
+}
+
+int kbase_devfreq_init(struct kbase_device *kbdev)
+{
+ struct devfreq_dev_profile *dp;
+ int err;
+
+ if (!kbdev->clock)
+ return -ENODEV;
+
+ kbdev->current_freq = clk_get_rate(kbdev->clock);
+
+ dp = &kbdev->devfreq_profile;
+
+ dp->initial_freq = kbdev->current_freq;
+ dp->polling_ms = 100;
+ dp->target = kbase_devfreq_target;
+ dp->get_dev_status = kbase_devfreq_status;
+ dp->get_cur_freq = kbase_devfreq_cur_freq;
+ dp->exit = kbase_devfreq_exit;
+
+ if (kbase_devfreq_init_freq_table(kbdev, dp))
+ return -EFAULT;
+
+ kbdev->devfreq = devfreq_add_device(kbdev->dev, dp,
+ "simple_ondemand", NULL);
+ if (IS_ERR(kbdev->devfreq)) {
+ kbase_devfreq_term_freq_table(kbdev);
+ return PTR_ERR(kbdev->devfreq);
+ }
+
+ err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq);
+ if (err) {
+ dev_err(kbdev->dev,
+ "Failed to register OPP notifier (%d)\n", err);
+ goto opp_notifier_failed;
+ }
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+ err = kbase_power_model_simple_init(kbdev);
+ if (err && err != -ENODEV && err != -EPROBE_DEFER) {
+ dev_err(kbdev->dev,
+ "Failed to initialize simple power model (%d)\n",
+ err);
+ goto cooling_failed;
+ }
+ if (err == -EPROBE_DEFER)
+ goto cooling_failed;
+ if (err != -ENODEV) {
+ kbdev->devfreq_cooling = of_devfreq_cooling_register_power(
+ kbdev->dev->of_node,
+ kbdev->devfreq,
+ &power_model_simple_ops);
+ if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) {
+ err = PTR_ERR(kbdev->devfreq_cooling);
+ dev_err(kbdev->dev,
+ "Failed to register cooling device (%d)\n",
+ err);
+ goto cooling_failed;
+ }
+ } else {
+ err = 0;
+ }
+#endif
+
+ return 0;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+cooling_failed:
+ devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+opp_notifier_failed:
+ if (devfreq_remove_device(kbdev->devfreq))
+ dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+ else
+ kbdev->devfreq = NULL;
+
+ return err;
+}
+
+void kbase_devfreq_term(struct kbase_device *kbdev)
+{
+ int err;
+
+ dev_dbg(kbdev->dev, "Term Mali devfreq\n");
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+ if (kbdev->devfreq_cooling)
+ devfreq_cooling_unregister(kbdev->devfreq_cooling);
+#endif
+
+ devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+
+ err = devfreq_remove_device(kbdev->devfreq);
+ if (err)
+ dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+ else
+ kbdev->devfreq = NULL;
+}
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_devfreq.h b/midgard-0.r2p0/backend/gpu/mali_kbase_devfreq.h
new file mode 100755
index 0000000..c0bf8b1
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_devfreq.h
@@ -0,0 +1,24 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _BASE_DEVFREQ_H_
+#define _BASE_DEVFREQ_H_
+
+int kbase_devfreq_init(struct kbase_device *kbdev);
+void kbase_devfreq_term(struct kbase_device *kbdev);
+
+#endif /* _BASE_DEVFREQ_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_device_hw.c b/midgard-0.r2p0/backend/gpu/mali_kbase_device_hw.c
new file mode 100755
index 0000000..dcdf15c
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_device_hw.c
@@ -0,0 +1,255 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ *
+ */
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_instr_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <backend/gpu/mali_kbase_device_internal.h>
+
+#if !defined(CONFIG_MALI_NO_MALI)
+
+
+#ifdef CONFIG_DEBUG_FS
+
+
+int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size)
+{
+ struct kbase_io_access *old_buf;
+ struct kbase_io_access *new_buf;
+ unsigned long flags;
+
+ if (!new_size)
+ goto out_err; /* The new size must not be 0 */
+
+ new_buf = vmalloc(new_size * sizeof(*h->buf));
+ if (!new_buf)
+ goto out_err;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ old_buf = h->buf;
+
+ /* Note: we won't bother with copying the old data over. The dumping
+ * logic wouldn't work properly as it relies on 'count' both as a
+ * counter and as an index to the buffer which would have changed with
+ * the new array. This is a corner case that we don't need to support.
+ */
+ h->count = 0;
+ h->size = new_size;
+ h->buf = new_buf;
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ vfree(old_buf);
+
+ return 0;
+
+out_err:
+ return -1;
+}
+
+
+int kbase_io_history_init(struct kbase_io_history *h, u16 n)
+{
+ h->enabled = false;
+ spin_lock_init(&h->lock);
+ h->count = 0;
+ h->size = 0;
+ h->buf = NULL;
+ if (kbase_io_history_resize(h, n))
+ return -1;
+
+ return 0;
+}
+
+
+void kbase_io_history_term(struct kbase_io_history *h)
+{
+ vfree(h->buf);
+ h->buf = NULL;
+}
+
+
+/* kbase_io_history_add - add new entry to the register access history
+ *
+ * @h: Pointer to the history data structure
+ * @addr: Register address
+ * @value: The value that is either read from or written to the register
+ * @write: 1 if it's a register write, 0 if it's a read
+ */
+static void kbase_io_history_add(struct kbase_io_history *h,
+ void __iomem const *addr, u32 value, u8 write)
+{
+ struct kbase_io_access *io;
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ io = &h->buf[h->count % h->size];
+ io->addr = (uintptr_t)addr | write;
+ io->value = value;
+ ++h->count;
+ /* If count overflows, move the index by the buffer size so the entire
+ * buffer will still be dumped later */
+ if (unlikely(!h->count))
+ h->count = h->size;
+
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+
+void kbase_io_history_dump(struct kbase_device *kbdev)
+{
+ struct kbase_io_history *const h = &kbdev->io_history;
+ u16 i;
+ size_t iters;
+ unsigned long flags;
+
+ if (!unlikely(h->enabled))
+ return;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ dev_err(kbdev->dev, "Register IO History:");
+ iters = (h->size > h->count) ? h->count : h->size;
+ dev_err(kbdev->dev, "Last %zu register accesses of %zu total:\n", iters,
+ h->count);
+ for (i = 0; i < iters; ++i) {
+ struct kbase_io_access *io =
+ &h->buf[(h->count - iters + i) % h->size];
+ char const access = (io->addr & 1) ? 'w' : 'r';
+
+ dev_err(kbdev->dev, "%6i: %c: reg 0x%p val %08x\n", i, access,
+ (void *)(io->addr & ~0x1), io->value);
+ }
+
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+
+#endif /* CONFIG_DEBUG_FS */
+
+
+void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
+ struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+ KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+
+ writel(value, kbdev->reg + offset);
+
+#ifdef CONFIG_DEBUG_FS
+ if (unlikely(kbdev->io_history.enabled))
+ kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+ value, 1);
+#endif /* CONFIG_DEBUG_FS */
+ dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value);
+
+ if (kctx && kctx->jctx.tb)
+ kbase_device_trace_register_access(kctx, REG_WRITE, offset,
+ value);
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_write);
+
+u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
+ struct kbase_context *kctx)
+{
+ u32 val;
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+ KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+
+ val = readl(kbdev->reg + offset);
+
+#ifdef CONFIG_DEBUG_FS
+ if (unlikely(kbdev->io_history.enabled))
+ kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+ val, 0);
+#endif /* CONFIG_DEBUG_FS */
+ dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val);
+
+ if (kctx && kctx->jctx.tb)
+ kbase_device_trace_register_access(kctx, REG_READ, offset, val);
+ return val;
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_read);
+#endif /* !defined(CONFIG_MALI_NO_MALI) */
+
+/**
+ * kbase_report_gpu_fault - Report a GPU fault.
+ * @kbdev: Kbase device pointer
+ * @multiple: Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS
+ * was also set
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using dev_warn().
+ */
+static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
+{
+ u32 status;
+ u64 address;
+
+ status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL);
+ address = (u64) kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32;
+ address |= kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
+
+ dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
+ status & 0xFF,
+ kbase_exception_name(kbdev, status),
+ address);
+ if (multiple)
+ dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
+}
+
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
+{
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
+ if (val & GPU_FAULT)
+ kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
+
+ if (val & RESET_COMPLETED)
+ kbase_pm_reset_done(kbdev);
+
+ if (val & PRFCNT_SAMPLE_COMPLETED)
+ kbase_instr_hwcnt_sample_done(kbdev);
+
+ if (val & CLEAN_CACHES_COMPLETED)
+ kbase_clean_caches_done(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL);
+
+ /* kbase_pm_check_transitions must be called after the IRQ has been
+ * cleared. This is because it might trigger further power transitions
+ * and we don't want to miss the interrupt raised to notify us that
+ * these further transitions have finished.
+ */
+ if (val & POWER_CHANGED_ALL)
+ kbase_pm_power_changed(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
+}
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_device_internal.h b/midgard-0.r2p0/backend/gpu/mali_kbase_device_internal.h
new file mode 100755
index 0000000..5b20445
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_device_internal.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Backend-specific HW access device APIs
+ */
+
+#ifndef _KBASE_DEVICE_INTERNAL_H_
+#define _KBASE_DEVICE_INTERNAL_H_
+
+/**
+ * kbase_reg_write - write to GPU register
+ * @kbdev: Kbase device pointer
+ * @offset: Offset of register
+ * @value: Value to write
+ * @kctx: Kbase context pointer. May be NULL
+ *
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If
+ * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr
+ * != KBASEP_AS_NR_INVALID).
+ */
+void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_reg_read - read from GPU register
+ * @kbdev: Kbase device pointer
+ * @offset: Offset of register
+ * @kctx: Kbase context pointer. May be NULL
+ *
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If
+ * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr
+ * != KBASEP_AS_NR_INVALID).
+ *
+ * Return: Value in desired register
+ */
+u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
+ struct kbase_context *kctx);
+
+
+/**
+ * kbase_gpu_interrupt - GPU interrupt handler
+ * @kbdev: Kbase device pointer
+ * @val: The value of the GPU IRQ status register which triggered the call
+ *
+ * This function is called from the interrupt handler when a GPU irq is to be
+ * handled.
+ */
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val);
+
+#endif /* _KBASE_DEVICE_INTERNAL_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_gpu.c b/midgard-0.r2p0/backend/gpu/mali_kbase_gpu.c
new file mode 100755
index 0000000..d578fd7
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_gpu.c
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend APIs
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_backend.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+int kbase_backend_early_init(struct kbase_device *kbdev)
+{
+ int err;
+
+ err = kbasep_platform_device_init(kbdev);
+ if (err)
+ return err;
+
+ /* Ensure we can access the GPU registers */
+ kbase_pm_register_access_enable(kbdev);
+
+ /* Find out GPU properties based on the GPU feature registers */
+ kbase_gpuprops_set(kbdev);
+
+ /* We're done accessing the GPU registers for now. */
+ kbase_pm_register_access_disable(kbdev);
+
+ err = kbase_hwaccess_pm_init(kbdev);
+ if (err)
+ goto fail_pm;
+
+ err = kbase_install_interrupts(kbdev);
+ if (err)
+ goto fail_interrupts;
+
+ return 0;
+
+fail_interrupts:
+ kbase_hwaccess_pm_term(kbdev);
+fail_pm:
+ kbasep_platform_device_term(kbdev);
+
+ return err;
+}
+
+void kbase_backend_early_term(struct kbase_device *kbdev)
+{
+ kbase_release_interrupts(kbdev);
+ kbase_hwaccess_pm_term(kbdev);
+ kbasep_platform_device_term(kbdev);
+}
+
+int kbase_backend_late_init(struct kbase_device *kbdev)
+{
+ int err;
+
+ err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT);
+ if (err)
+ return err;
+
+ err = kbase_backend_timer_init(kbdev);
+ if (err)
+ goto fail_timer;
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+ if (kbasep_common_test_interrupt_handlers(kbdev) != 0) {
+ dev_err(kbdev->dev, "Interrupt assigment check failed.\n");
+ err = -EINVAL;
+ goto fail_interrupt_test;
+ }
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+ err = kbase_job_slot_init(kbdev);
+ if (err)
+ goto fail_job_slot;
+
+ init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait);
+
+ return 0;
+
+fail_job_slot:
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+fail_interrupt_test:
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+ kbase_backend_timer_term(kbdev);
+fail_timer:
+ kbase_hwaccess_pm_halt(kbdev);
+
+ return err;
+}
+
+void kbase_backend_late_term(struct kbase_device *kbdev)
+{
+ kbase_job_slot_halt(kbdev);
+ kbase_job_slot_term(kbdev);
+ kbase_backend_timer_term(kbdev);
+ kbase_hwaccess_pm_halt(kbdev);
+}
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_gpuprops_backend.c b/midgard-0.r2p0/backend/gpu/mali_kbase_gpuprops_backend.c
new file mode 100755
index 0000000..d410cd2
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_gpuprops_backend.c
@@ -0,0 +1,105 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel property query backend APIs
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <mali_kbase_hwaccess_gpuprops.h>
+
+void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump)
+{
+ int i;
+
+ /* Fill regdump with the content of the relevant registers */
+ regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID), NULL);
+
+ regdump->l2_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_FEATURES), NULL);
+ regdump->suspend_size = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SUSPEND_SIZE), NULL);
+ regdump->tiler_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_FEATURES), NULL);
+ regdump->mem_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(MEM_FEATURES), NULL);
+ regdump->mmu_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(MMU_FEATURES), NULL);
+ regdump->as_present = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(AS_PRESENT), NULL);
+ regdump->js_present = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(JS_PRESENT), NULL);
+
+ for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
+ regdump->js_features[i] = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(JS_FEATURES_REG(i)), NULL);
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ regdump->texture_features[i] = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)), NULL);
+
+ regdump->thread_max_threads = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_MAX_THREADS), NULL);
+ regdump->thread_max_workgroup_size = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE),
+ NULL);
+ regdump->thread_max_barrier_size = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE), NULL);
+ regdump->thread_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_FEATURES), NULL);
+
+ regdump->shader_present_lo = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_PRESENT_LO), NULL);
+ regdump->shader_present_hi = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_PRESENT_HI), NULL);
+
+ regdump->tiler_present_lo = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_PRESENT_LO), NULL);
+ regdump->tiler_present_hi = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_PRESENT_HI), NULL);
+
+ regdump->l2_present_lo = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_PRESENT_LO), NULL);
+ regdump->l2_present_hi = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_PRESENT_HI), NULL);
+}
+
+void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump)
+{
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG)) {
+ /* Ensure we can access the GPU registers */
+ kbase_pm_register_access_enable(kbdev);
+
+ regdump->coherency_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(COHERENCY_FEATURES), NULL);
+
+ /* We're done accessing the GPU registers for now. */
+ kbase_pm_register_access_disable(kbdev);
+ } else {
+ /* Pre COHERENCY_FEATURES we only supported ACE_LITE */
+ regdump->coherency_features =
+ COHERENCY_FEATURE_BIT(COHERENCY_NONE) |
+ COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+ }
+}
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_instr_backend.c b/midgard-0.r2p0/backend/gpu/mali_kbase_instr_backend.c
new file mode 100755
index 0000000..7ad309e
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_instr_backend.c
@@ -0,0 +1,492 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * GPU backend instrumentation APIs.
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_instr_internal.h>
+
+/**
+ * kbasep_instr_hwcnt_cacheclean - Issue Cache Clean & Invalidate command to
+ * hardware
+ *
+ * @kbdev: Kbase device
+ */
+static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ unsigned long pm_flags;
+ u32 irq_mask;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_REQUEST_CLEAN);
+
+ /* Enable interrupt */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ irq_mask | CLEAN_CACHES_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+ /* clean&invalidate the caches so we're sure the mmu tables for the dump
+ * buffer is valid */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CLEAN_INV_CACHES, NULL);
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANING;
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_uk_hwcnt_setup *setup)
+{
+ unsigned long flags, pm_flags;
+ int err = -EINVAL;
+ u32 irq_mask;
+ int ret;
+ u64 shader_cores_needed;
+ u32 prfcnt_config;
+
+ shader_cores_needed = kbase_pm_get_present_cores(kbdev,
+ KBASE_PM_CORE_SHADER);
+
+ /* alignment failure */
+ if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1)))
+ goto out_err;
+
+ /* Override core availability policy to ensure all cores are available
+ */
+ kbase_pm_ca_instr_enable(kbdev);
+
+ /* Request the cores early on synchronously - we'll release them on any
+ * errors (e.g. instrumentation already active) */
+ kbase_pm_request_cores_sync(kbdev, true, shader_cores_needed);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
+ /* Instrumentation is already enabled */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out_unrequest_cores;
+ }
+
+ /* Enable interrupt */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask |
+ PRFCNT_SAMPLE_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+ /* In use, this context is the owner */
+ kbdev->hwcnt.kctx = kctx;
+ /* Remember the dump address so we can reprogram it later */
+ kbdev->hwcnt.addr = setup->dump_buffer;
+
+ /* Request the clean */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+ kbdev->hwcnt.backend.triggered = 0;
+ /* Clean&invalidate the caches so we're sure the mmu tables for the dump
+ * buffer is valid */
+ ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
+ &kbdev->hwcnt.backend.cache_clean_work);
+ KBASE_DEBUG_ASSERT(ret);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Wait for cacheclean to complete */
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_IDLE);
+
+ kbase_pm_request_l2_caches(kbdev);
+
+ /* Configure */
+ prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
+#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
+ {
+ u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID)
+ >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id);
+
+ if (arch_v6)
+ prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
+ }
+#endif
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+ prfcnt_config | PRFCNT_CONFIG_MODE_OFF, kctx);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+ setup->dump_buffer & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+ setup->dump_buffer >> 32, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
+ setup->jm_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
+ setup->shader_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
+ setup->mmu_l2_bm, kctx);
+ /* Due to PRLAM-8186 we need to disable the Tiler before we enable the
+ * HW counter dump. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0,
+ kctx);
+ else
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+ setup->tiler_bm, kctx);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+ prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL, kctx);
+
+ /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+ setup->tiler_bm, kctx);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ err = 0;
+
+ dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
+ return err;
+ out_unrequest_cores:
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_unrequest_cores(kbdev, true, shader_cores_needed);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ out_err:
+ return err;
+}
+
+int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
+{
+ unsigned long flags, pm_flags;
+ int err = -EINVAL;
+ u32 irq_mask;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ while (1) {
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DISABLED) {
+ /* Instrumentation is not enabled */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out;
+ }
+
+ if (kbdev->hwcnt.kctx != kctx) {
+ /* Instrumentation has been setup for another context */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out;
+ }
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE)
+ break;
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Ongoing dump/setup - wait for its completion */
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+ }
+
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+ kbdev->hwcnt.backend.triggered = 0;
+
+ /* Disable interrupt */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL);
+
+ /* Disable the counters */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx);
+
+ kbdev->hwcnt.kctx = NULL;
+ kbdev->hwcnt.addr = 0ULL;
+
+ kbase_pm_ca_instr_disable(kbdev);
+
+ kbase_pm_unrequest_cores(kbdev, true,
+ kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER));
+
+ kbase_pm_release_l2_caches(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p",
+ kctx);
+
+ err = 0;
+
+ out:
+ return err;
+}
+
+int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
+{
+ unsigned long flags;
+ int err = -EINVAL;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.kctx != kctx) {
+ /* The instrumentation has been setup for another context */
+ goto unlock;
+ }
+
+ if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) {
+ /* HW counters are disabled or another dump is ongoing, or we're
+ * resetting */
+ goto unlock;
+ }
+
+ kbdev->hwcnt.backend.triggered = 0;
+
+ /* Mark that we're dumping - the PF handler can signal that we faulted
+ */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING;
+
+ /* Reconfigure the dump address */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+ kbdev->hwcnt.addr & 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+ kbdev->hwcnt.addr >> 32, NULL);
+
+ /* Start dumping */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL,
+ kbdev->hwcnt.addr, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_PRFCNT_SAMPLE, kctx);
+
+ dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
+
+ err = 0;
+
+ unlock:
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump);
+
+bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+ bool * const success)
+{
+ unsigned long flags;
+ bool complete = false;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) {
+ *success = true;
+ complete = true;
+ } else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+ *success = false;
+ complete = true;
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ return complete;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete);
+
+void kbasep_cache_clean_worker(struct work_struct *data)
+{
+ struct kbase_device *kbdev;
+ unsigned long flags;
+
+ kbdev = container_of(data, struct kbase_device,
+ hwcnt.backend.cache_clean_work);
+
+ mutex_lock(&kbdev->cacheclean_lock);
+ kbasep_instr_hwcnt_cacheclean(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Wait for our condition, and any reset to complete */
+ while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) {
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.backend.cache_clean_wait,
+ kbdev->hwcnt.backend.state !=
+ KBASE_INSTR_STATE_CLEANING);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_CLEANED);
+
+ /* All finished and idle */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ mutex_unlock(&kbdev->cacheclean_lock);
+}
+
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+ } else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) {
+ int ret;
+ /* Always clean and invalidate the cache after a successful dump
+ */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+ ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
+ &kbdev->hwcnt.backend.cache_clean_work);
+ KBASE_DEBUG_ASSERT(ret);
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+void kbase_clean_caches_done(struct kbase_device *kbdev)
+{
+ u32 irq_mask;
+
+ if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
+ unsigned long flags;
+ unsigned long pm_flags;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Disable interrupt */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ irq_mask & ~CLEAN_CACHES_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+ /* Wakeup... */
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) {
+ /* Only wake if we weren't resetting */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED;
+ wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ }
+}
+
+int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ unsigned long flags;
+ int err;
+
+ /* Wait for dump & cacheclean to complete */
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+ err = -EINVAL;
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ } else {
+ /* Dump done */
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_IDLE);
+ err = 0;
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ return err;
+}
+
+int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
+{
+ unsigned long flags;
+ int err = -EINVAL;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ /* Check it's the context previously set up and we're not already
+ * dumping */
+ if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=
+ KBASE_INSTR_STATE_IDLE)
+ goto out;
+
+ /* Clear the counters */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_PRFCNT_CLEAR, kctx);
+
+ err = 0;
+
+out:
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear);
+
+int kbase_instr_backend_init(struct kbase_device *kbdev)
+{
+ int ret = 0;
+
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+
+ init_waitqueue_head(&kbdev->hwcnt.backend.wait);
+ init_waitqueue_head(&kbdev->hwcnt.backend.cache_clean_wait);
+ INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work,
+ kbasep_cache_clean_worker);
+ kbdev->hwcnt.backend.triggered = 0;
+
+ kbdev->hwcnt.backend.cache_clean_wq =
+ alloc_workqueue("Mali cache cleaning workqueue", 0, 1);
+ if (NULL == kbdev->hwcnt.backend.cache_clean_wq)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+void kbase_instr_backend_term(struct kbase_device *kbdev)
+{
+ destroy_workqueue(kbdev->hwcnt.backend.cache_clean_wq);
+}
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_instr_defs.h b/midgard-0.r2p0/backend/gpu/mali_kbase_instr_defs.h
new file mode 100755
index 0000000..4794672
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_instr_defs.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Backend-specific instrumentation definitions
+ */
+
+#ifndef _KBASE_INSTR_DEFS_H_
+#define _KBASE_INSTR_DEFS_H_
+
+/*
+ * Instrumentation State Machine States
+ */
+enum kbase_instr_state {
+ /* State where instrumentation is not active */
+ KBASE_INSTR_STATE_DISABLED = 0,
+ /* State machine is active and ready for a command. */
+ KBASE_INSTR_STATE_IDLE,
+ /* Hardware is currently dumping a frame. */
+ KBASE_INSTR_STATE_DUMPING,
+ /* We've requested a clean to occur on a workqueue */
+ KBASE_INSTR_STATE_REQUEST_CLEAN,
+ /* Hardware is currently cleaning and invalidating caches. */
+ KBASE_INSTR_STATE_CLEANING,
+ /* Cache clean completed, and either a) a dump is complete, or
+ * b) instrumentation can now be setup. */
+ KBASE_INSTR_STATE_CLEANED,
+ /* An error has occured during DUMPING (page fault). */
+ KBASE_INSTR_STATE_FAULT
+};
+
+/* Structure used for instrumentation and HW counters dumping */
+struct kbase_instr_backend {
+ wait_queue_head_t wait;
+ int triggered;
+
+ enum kbase_instr_state state;
+ wait_queue_head_t cache_clean_wait;
+ struct workqueue_struct *cache_clean_wq;
+ struct work_struct cache_clean_work;
+};
+
+#endif /* _KBASE_INSTR_DEFS_H_ */
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_instr_internal.h b/midgard-0.r2p0/backend/gpu/mali_kbase_instr_internal.h
new file mode 100755
index 0000000..e96aeae
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_instr_internal.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Backend-specific HW access instrumentation APIs
+ */
+
+#ifndef _KBASE_INSTR_INTERNAL_H_
+#define _KBASE_INSTR_INTERNAL_H_
+
+/**
+ * kbasep_cache_clean_worker() - Workqueue for handling cache cleaning
+ * @data: a &struct work_struct
+ */
+void kbasep_cache_clean_worker(struct work_struct *data);
+
+/**
+ * kbase_clean_caches_done() - Cache clean interrupt received
+ * @kbdev: Kbase device
+ */
+void kbase_clean_caches_done(struct kbase_device *kbdev);
+
+/**
+ * kbase_instr_hwcnt_sample_done() - Dump complete interrupt received
+ * @kbdev: Kbase device
+ */
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev);
+
+#endif /* _KBASE_INSTR_INTERNAL_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_irq_internal.h b/midgard-0.r2p0/backend/gpu/mali_kbase_irq_internal.h
new file mode 100755
index 0000000..8781561
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_irq_internal.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Backend specific IRQ APIs
+ */
+
+#ifndef _KBASE_IRQ_INTERNAL_H_
+#define _KBASE_IRQ_INTERNAL_H_
+
+int kbase_install_interrupts(struct kbase_device *kbdev);
+
+void kbase_release_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_synchronize_irqs - Ensure that all IRQ handlers have completed
+ * execution
+ * @kbdev: The kbase device
+ */
+void kbase_synchronize_irqs(struct kbase_device *kbdev);
+
+int kbasep_common_test_interrupt_handlers(
+ struct kbase_device * const kbdev);
+
+#endif /* _KBASE_IRQ_INTERNAL_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_irq_linux.c b/midgard-0.r2p0/backend/gpu/mali_kbase_irq_linux.c
new file mode 100755
index 0000000..8416b80
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_irq_linux.c
@@ -0,0 +1,469 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+
+#include <linux/interrupt.h>
+
+#if !defined(CONFIG_MALI_NO_MALI)
+
+/* GPU IRQ Tags */
+#define JOB_IRQ_TAG 0
+#define MMU_IRQ_TAG 1
+#define GPU_IRQ_TAG 2
+
+static void *kbase_tag(void *ptr, u32 tag)
+{
+ return (void *)(((uintptr_t) ptr) | tag);
+}
+
+static void *kbase_untag(void *ptr)
+{
+ return (void *)(((uintptr_t) ptr) & ~3);
+}
+
+static irqreturn_t kbase_job_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
+
+#ifdef CONFIG_MALI_DEBUG
+ if (!kbdev->pm.backend.driver_ready_for_irqs)
+ dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+ __func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbase_job_done(kbdev, val);
+
+ return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
+
+static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ atomic_inc(&kbdev->faults_pending);
+
+ val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
+
+#ifdef CONFIG_MALI_DEBUG
+ if (!kbdev->pm.backend.driver_ready_for_irqs)
+ dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+ __func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val) {
+ atomic_dec(&kbdev->faults_pending);
+ return IRQ_NONE;
+ }
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbase_mmu_interrupt(kbdev, val);
+
+ atomic_dec(&kbdev->faults_pending);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
+
+#ifdef CONFIG_MALI_DEBUG
+ if (!kbdev->pm.backend.driver_ready_for_irqs)
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+ __func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbase_gpu_interrupt(kbdev, val);
+
+ return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_irq_handler);
+
+static irq_handler_t kbase_handler_table[] = {
+ [JOB_IRQ_TAG] = kbase_job_irq_handler,
+ [MMU_IRQ_TAG] = kbase_mmu_irq_handler,
+ [GPU_IRQ_TAG] = kbase_gpu_irq_handler,
+};
+
+#ifdef CONFIG_MALI_DEBUG
+#define JOB_IRQ_HANDLER JOB_IRQ_TAG
+#define MMU_IRQ_HANDLER MMU_IRQ_TAG
+#define GPU_IRQ_HANDLER GPU_IRQ_TAG
+
+/**
+ * kbase_set_custom_irq_handler - Set a custom IRQ handler
+ * @kbdev: Device for which the handler is to be registered
+ * @custom_handler: Handler to be registered
+ * @irq_type: Interrupt type
+ *
+ * Registers given interrupt handler for requested interrupt type
+ * In the case where irq handler is not specified, the default handler shall be
+ * registered
+ *
+ * Return: 0 case success, error code otherwise
+ */
+int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
+ irq_handler_t custom_handler,
+ int irq_type)
+{
+ int result = 0;
+ irq_handler_t requested_irq_handler = NULL;
+
+ KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) &&
+ (GPU_IRQ_HANDLER >= irq_type));
+
+ /* Release previous handler */
+ if (kbdev->irqs[irq_type].irq)
+ free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
+
+ requested_irq_handler = (NULL != custom_handler) ? custom_handler :
+ kbase_handler_table[irq_type];
+
+ if (0 != request_irq(kbdev->irqs[irq_type].irq,
+ requested_irq_handler,
+ kbdev->irqs[irq_type].flags | IRQF_SHARED,
+ dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
+ result = -EINVAL;
+ dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+ kbdev->irqs[irq_type].irq, irq_type);
+#ifdef CONFIG_SPARSE_IRQ
+ dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+#endif /* CONFIG_SPARSE_IRQ */
+ }
+
+ return result;
+}
+
+KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
+
+/* test correct interrupt assigment and reception by cpu */
+struct kbasep_irq_test {
+ struct hrtimer timer;
+ wait_queue_head_t wait;
+ int triggered;
+ u32 timeout;
+};
+
+static struct kbasep_irq_test kbasep_irq_test_data;
+
+#define IRQ_TEST_TIMEOUT 500
+
+static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
+
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbasep_irq_test_data.triggered = 1;
+ wake_up(&kbasep_irq_test_data.wait);
+
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev = kbase_untag(data);
+ u32 val;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* GPU is turned off - IRQ is not for us */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return IRQ_NONE;
+ }
+
+ val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
+
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (!val)
+ return IRQ_NONE;
+
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+ kbasep_irq_test_data.triggered = 1;
+ wake_up(&kbasep_irq_test_data.wait);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
+{
+ struct kbasep_irq_test *test_data = container_of(timer,
+ struct kbasep_irq_test, timer);
+
+ test_data->timeout = 1;
+ test_data->triggered = 1;
+ wake_up(&test_data->wait);
+ return HRTIMER_NORESTART;
+}
+
+static int kbasep_common_test_interrupt(
+ struct kbase_device * const kbdev, u32 tag)
+{
+ int err = 0;
+ irq_handler_t test_handler;
+
+ u32 old_mask_val;
+ u16 mask_offset;
+ u16 rawstat_offset;
+
+ switch (tag) {
+ case JOB_IRQ_TAG:
+ test_handler = kbase_job_irq_test_handler;
+ rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
+ mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
+ break;
+ case MMU_IRQ_TAG:
+ test_handler = kbase_mmu_irq_test_handler;
+ rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
+ mask_offset = MMU_REG(MMU_IRQ_MASK);
+ break;
+ case GPU_IRQ_TAG:
+ /* already tested by pm_driver - bail out */
+ default:
+ return 0;
+ }
+
+ /* store old mask */
+ old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL);
+ /* mask interrupts */
+ kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
+
+ if (kbdev->irqs[tag].irq) {
+ /* release original handler and install test handler */
+ if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) {
+ err = -EINVAL;
+ } else {
+ kbasep_irq_test_data.timeout = 0;
+ hrtimer_init(&kbasep_irq_test_data.timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kbasep_irq_test_data.timer.function =
+ kbasep_test_interrupt_timeout;
+
+ /* trigger interrupt */
+ kbase_reg_write(kbdev, mask_offset, 0x1, NULL);
+ kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL);
+
+ hrtimer_start(&kbasep_irq_test_data.timer,
+ HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT),
+ HRTIMER_MODE_REL);
+
+ wait_event(kbasep_irq_test_data.wait,
+ kbasep_irq_test_data.triggered != 0);
+
+ if (kbasep_irq_test_data.timeout != 0) {
+ dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n",
+ kbdev->irqs[tag].irq, tag);
+ err = -EINVAL;
+ } else {
+ dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n",
+ kbdev->irqs[tag].irq, tag);
+ }
+
+ hrtimer_cancel(&kbasep_irq_test_data.timer);
+ kbasep_irq_test_data.triggered = 0;
+
+ /* mask interrupts */
+ kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
+
+ /* release test handler */
+ free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag));
+ }
+
+ /* restore original interrupt */
+ if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag],
+ kbdev->irqs[tag].flags | IRQF_SHARED,
+ dev_name(kbdev->dev), kbase_tag(kbdev, tag))) {
+ dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n",
+ kbdev->irqs[tag].irq, tag);
+ err = -EINVAL;
+ }
+ }
+ /* restore old mask */
+ kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL);
+
+ return err;
+}
+
+int kbasep_common_test_interrupt_handlers(
+ struct kbase_device * const kbdev)
+{
+ int err;
+
+ init_waitqueue_head(&kbasep_irq_test_data.wait);
+ kbasep_irq_test_data.triggered = 0;
+
+ /* A suspend won't happen during startup/insmod */
+ kbase_pm_context_active(kbdev);
+
+ err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
+ if (err) {
+ dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
+ goto out;
+ }
+
+ err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
+ if (err) {
+ dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
+ goto out;
+ }
+
+ dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n");
+
+ out:
+ kbase_pm_context_idle(kbdev);
+
+ return err;
+}
+#endif /* CONFIG_MALI_DEBUG */
+
+int kbase_install_interrupts(struct kbase_device *kbdev)
+{
+ u32 nr = ARRAY_SIZE(kbase_handler_table);
+ int err;
+ u32 i;
+
+ for (i = 0; i < nr; i++) {
+ err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i],
+ kbdev->irqs[i].flags | IRQF_SHARED,
+ dev_name(kbdev->dev),
+ kbase_tag(kbdev, i));
+ if (err) {
+ dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+ kbdev->irqs[i].irq, i);
+#ifdef CONFIG_SPARSE_IRQ
+ dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+#endif /* CONFIG_SPARSE_IRQ */
+ goto release;
+ }
+ }
+
+ return 0;
+
+ release:
+ while (i-- > 0)
+ free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
+
+ return err;
+}
+
+void kbase_release_interrupts(struct kbase_device *kbdev)
+{
+ u32 nr = ARRAY_SIZE(kbase_handler_table);
+ u32 i;
+
+ for (i = 0; i < nr; i++) {
+ if (kbdev->irqs[i].irq)
+ free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
+ }
+}
+
+void kbase_synchronize_irqs(struct kbase_device *kbdev)
+{
+ u32 nr = ARRAY_SIZE(kbase_handler_table);
+ u32 i;
+
+ for (i = 0; i < nr; i++) {
+ if (kbdev->irqs[i].irq)
+ synchronize_irq(kbdev->irqs[i].irq);
+ }
+}
+
+#endif /* !defined(CONFIG_MALI_NO_MALI) */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_jm_as.c b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_as.c
new file mode 100755
index 0000000..202dcfa
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_as.c
@@ -0,0 +1,378 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register backend context / address space management
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+
+/**
+ * assign_and_activate_kctx_addr_space - Assign an AS to a context
+ * @kbdev: Kbase device
+ * @kctx: Kbase context
+ * @current_as: Address Space to assign
+ *
+ * Assign an Address Space (AS) to a context, and add the context to the Policy.
+ *
+ * This includes
+ * setting up the global runpool_irq structure and the context on the AS,
+ * Activating the MMU on the AS,
+ * Allowing jobs to be submitted on the AS.
+ *
+ * Context:
+ * kbasep_js_kctx_info.jsctx_mutex held,
+ * kbasep_js_device_data.runpool_mutex held,
+ * AS transaction mutex held,
+ * Runpool IRQ lock held
+ */
+static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_as *current_as)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ struct kbasep_js_per_as_data *js_per_as_data;
+ int as_nr = current_as->number;
+
+ lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ /* Attribute handling */
+ kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
+
+ /* Assign addr space */
+ kctx->as_nr = as_nr;
+
+ /* If the GPU is currently powered, activate this address space on the
+ * MMU */
+ if (kbdev->pm.backend.gpu_powered)
+ kbase_mmu_update(kctx);
+ /* If the GPU was not powered then the MMU will be reprogrammed on the
+ * next pm_context_active() */
+
+ /* Allow it to run jobs */
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+ /* Book-keeping */
+ js_per_as_data->kctx = kctx;
+ js_per_as_data->as_busy_refcount = 0;
+
+ kbase_js_runpool_inc_context_count(kbdev, kctx);
+}
+
+/**
+ * release_addr_space - Release an address space
+ * @kbdev: Kbase device
+ * @kctx_as_nr: Address space of context to release
+ * @kctx: Context being released
+ *
+ * Context: kbasep_js_device_data.runpool_mutex must be held
+ *
+ * Release an address space, making it available for being picked again.
+ */
+static void release_addr_space(struct kbase_device *kbdev, int kctx_as_nr,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ u16 as_bit = (1u << kctx_as_nr);
+
+ js_devdata = &kbdev->js_data;
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ /* The address space must not already be free */
+ KBASE_DEBUG_ASSERT(!(js_devdata->as_free & as_bit));
+
+ js_devdata->as_free |= as_bit;
+
+ kbase_js_runpool_dec_context_count(kbdev, kctx);
+}
+
+bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ int i;
+
+ if (kbdev->hwaccess.active_kctx == kctx) {
+ /* Context is already active */
+ return true;
+ }
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ struct kbasep_js_per_as_data *js_per_as_data =
+ &kbdev->js_data.runpool_irq.per_as_data[i];
+
+ if (js_per_as_data->kctx == kctx) {
+ /* Context already has ASID - mark as active */
+ return true;
+ }
+ }
+
+ /* Context does not have address space assigned */
+ return false;
+}
+
+void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_per_as_data *js_per_as_data;
+ int as_nr = kctx->as_nr;
+
+ if (as_nr == KBASEP_AS_NR_INVALID) {
+ WARN(1, "Attempting to release context without ASID\n");
+ return;
+ }
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ js_per_as_data = &kbdev->js_data.runpool_irq.per_as_data[kctx->as_nr];
+ if (js_per_as_data->as_busy_refcount != 0) {
+ WARN(1, "Attempting to release active ASID\n");
+ return;
+ }
+
+ /* Release context from address space */
+ js_per_as_data->kctx = NULL;
+
+ kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx);
+ /* If the GPU is currently powered, de-activate this address space on
+ * the MMU */
+ if (kbdev->pm.backend.gpu_powered)
+ kbase_mmu_disable(kctx);
+ /* If the GPU was not powered then the MMU will be reprogrammed on the
+ * next pm_context_active() */
+
+ release_addr_space(kbdev, as_nr, kctx);
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+}
+
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+}
+
+void kbase_backend_release_free_address_space(struct kbase_device *kbdev,
+ int as_nr)
+{
+ struct kbasep_js_device_data *js_devdata;
+
+ js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ js_devdata->as_free |= (1 << as_nr);
+}
+
+/**
+ * check_is_runpool_full - check whether the runpool is full for a specified
+ * context
+ * @kbdev: Kbase device
+ * @kctx: Kbase context
+ *
+ * If kctx == NULL, then this makes the least restrictive check on the
+ * runpool. A specific context that is supplied immediately after could fail
+ * the check, even under the same conditions.
+ *
+ * Therefore, once a context is obtained you \b must re-check it with this
+ * function, since the return value could change to false.
+ *
+ * Context:
+ * In all cases, the caller must hold kbasep_js_device_data.runpool_mutex.
+ * When kctx != NULL the caller must hold the
+ * kbasep_js_kctx_info.ctx.jsctx_mutex.
+ * When kctx == NULL, then the caller need not hold any jsctx_mutex locks (but
+ * it doesn't do any harm to do so).
+ *
+ * Return: true if the runpool is full
+ */
+static bool check_is_runpool_full(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ bool is_runpool_full;
+
+ js_devdata = &kbdev->js_data;
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ /* Regardless of whether a context is submitting or not, can't have more
+ * than there are HW address spaces */
+ is_runpool_full = (bool) (js_devdata->nr_all_contexts_running >=
+ kbdev->nr_hw_address_spaces);
+
+ if (kctx && !kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ /* Contexts that submit might use less of the address spaces
+ * available, due to HW workarounds. In which case, the runpool
+ * is also full when the number of submitting contexts exceeds
+ * the number of submittable address spaces.
+ *
+ * Both checks must be made: can have nr_user_address_spaces ==
+ * nr_hw_address spaces, and at the same time can have
+ * nr_user_contexts_running < nr_all_contexts_running. */
+ is_runpool_full |= (bool)
+ (js_devdata->nr_user_contexts_running >=
+ kbdev->nr_user_address_spaces);
+ }
+
+ return is_runpool_full;
+}
+
+int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ unsigned long flags;
+ int i;
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ /* First try to find a free address space */
+ if (check_is_runpool_full(kbdev, kctx))
+ i = -1;
+ else
+ i = ffs(js_devdata->as_free) - 1;
+
+ if (i >= 0 && i < kbdev->nr_hw_address_spaces) {
+ js_devdata->as_free &= ~(1 << i);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return i;
+ }
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* No address space currently free, see if we can release one */
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ struct kbasep_js_per_as_data *js_per_as_data;
+ struct kbasep_js_kctx_info *as_js_kctx_info;
+ struct kbase_context *as_kctx;
+
+ js_per_as_data = &kbdev->js_data.runpool_irq.per_as_data[i];
+ as_kctx = js_per_as_data->kctx;
+ as_js_kctx_info = &as_kctx->jctx.sched_info;
+
+ /* Don't release privileged or active contexts, or contexts with
+ * jobs running */
+ if (as_kctx && !kbase_ctx_flag(as_kctx, KCTX_PRIVILEGED) &&
+ js_per_as_data->as_busy_refcount == 0) {
+ if (!kbasep_js_runpool_retain_ctx_nolock(kbdev,
+ as_kctx)) {
+ WARN(1, "Failed to retain active context\n");
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return KBASEP_AS_NR_INVALID;
+ }
+
+ kbasep_js_clear_submit_allowed(js_devdata, as_kctx);
+
+ /* Drop and retake locks to take the jsctx_mutex on the
+ * context we're about to release without violating lock
+ * ordering
+ */
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+
+ /* Release context from address space */
+ mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx);
+
+ if (!kbase_ctx_flag(as_kctx, KCTX_SCHEDULED)) {
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev,
+ as_kctx,
+ true);
+
+ js_devdata->as_free &= ~(1 << i);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+
+ return i;
+ }
+
+ /* Context was retained while locks were dropped,
+ * continue looking for free AS */
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return KBASEP_AS_NR_INVALID;
+}
+
+bool kbase_backend_use_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int as_nr)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbase_as *new_address_space = NULL;
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ if (kbdev->hwaccess.active_kctx == kctx ||
+ kctx->as_nr != KBASEP_AS_NR_INVALID ||
+ as_nr == KBASEP_AS_NR_INVALID) {
+ WARN(1, "Invalid parameters to use_ctx()\n");
+ return false;
+ }
+
+ new_address_space = &kbdev->as[as_nr];
+
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space);
+
+ if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
+ /* We need to retain it to keep the corresponding address space
+ */
+ kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+ }
+
+ return true;
+}
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_jm_defs.h b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_defs.h
new file mode 100755
index 0000000..08a7400
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_defs.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific definitions
+ */
+
+#ifndef _KBASE_HWACCESS_GPU_DEFS_H_
+#define _KBASE_HWACCESS_GPU_DEFS_H_
+
+/* SLOT_RB_SIZE must be < 256 */
+#define SLOT_RB_SIZE 2
+#define SLOT_RB_MASK (SLOT_RB_SIZE - 1)
+
+/**
+ * struct rb_entry - Ringbuffer entry
+ * @katom: Atom associated with this entry
+ */
+struct rb_entry {
+ struct kbase_jd_atom *katom;
+};
+
+/**
+ * struct slot_rb - Slot ringbuffer
+ * @entries: Ringbuffer entries
+ * @last_context: The last context to submit a job on this slot
+ * @read_idx: Current read index of buffer
+ * @write_idx: Current write index of buffer
+ * @job_chain_flag: Flag used to implement jobchain disambiguation
+ */
+struct slot_rb {
+ struct rb_entry entries[SLOT_RB_SIZE];
+
+ struct kbase_context *last_context;
+
+ u8 read_idx;
+ u8 write_idx;
+
+ u8 job_chain_flag;
+};
+
+/**
+ * struct kbase_backend_data - GPU backend specific data for HW access layer
+ * @slot_rb: Slot ringbuffers
+ * @rmu_workaround_flag: When PRLAM-8987 is present, this flag determines
+ * whether slots 0/1 or slot 2 are currently being
+ * pulled from
+ * @scheduling_timer: The timer tick used for rescheduling jobs
+ * @timer_running: Is the timer running? The runpool_mutex must be
+ * held whilst modifying this.
+ * @suspend_timer: Is the timer suspended? Set when a suspend
+ * occurs and cleared on resume. The runpool_mutex
+ * must be held whilst modifying this.
+ * @reset_gpu: Set to a KBASE_RESET_xxx value (see comments)
+ * @reset_workq: Work queue for performing the reset
+ * @reset_work: Work item for performing the reset
+ * @reset_wait: Wait event signalled when the reset is complete
+ * @reset_timer: Timeout for soft-stops before the reset
+ * @timeouts_updated: Have timeout values just been updated?
+ *
+ * The hwaccess_lock (a spinlock) must be held when accessing this structure
+ */
+struct kbase_backend_data {
+ struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS];
+
+ bool rmu_workaround_flag;
+
+ struct hrtimer scheduling_timer;
+
+ bool timer_running;
+ bool suspend_timer;
+
+ atomic_t reset_gpu;
+
+/* The GPU reset isn't pending */
+#define KBASE_RESET_GPU_NOT_PENDING 0
+/* kbase_prepare_to_reset_gpu has been called */
+#define KBASE_RESET_GPU_PREPARED 1
+/* kbase_reset_gpu has been called - the reset will now definitely happen
+ * within the timeout period */
+#define KBASE_RESET_GPU_COMMITTED 2
+/* The GPU reset process is currently occuring (timeout has expired or
+ * kbasep_try_reset_gpu_early was called) */
+#define KBASE_RESET_GPU_HAPPENING 3
+/* Reset the GPU silently, used when resetting the GPU as part of normal
+ * behavior (e.g. when exiting protected mode). */
+#define KBASE_RESET_GPU_SILENT 4
+ struct workqueue_struct *reset_workq;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ struct hrtimer reset_timer;
+
+ bool timeouts_updated;
+};
+
+/**
+ * struct kbase_jd_atom_backend - GPU backend specific katom data
+ */
+struct kbase_jd_atom_backend {
+};
+
+/**
+ * struct kbase_context_backend - GPU backend specific context data
+ */
+struct kbase_context_backend {
+};
+
+#endif /* _KBASE_HWACCESS_GPU_DEFS_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_jm_hw.c b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_hw.c
new file mode 100755
index 0000000..668258b
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_hw.c
@@ -0,0 +1,1560 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Base kernel job manager APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config.h>
+#include <mali_midg_regmap.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_vinstr.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_js_affinity.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+
+#define beenthere(kctx, f, a...) \
+ dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#if KBASE_GPU_RESET_EN
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev);
+static void kbasep_reset_timeout_worker(struct work_struct *data);
+static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer);
+#endif /* KBASE_GPU_RESET_EN */
+
+static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js,
+ struct kbase_context *kctx)
+{
+ return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), kctx);
+}
+
+void kbase_job_hw_submit(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ int js)
+{
+ struct kbase_context *kctx;
+ u32 cfg;
+ u64 jc_head = katom->jc;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(katom);
+
+ kctx = katom->kctx;
+
+ /* Command register must be available */
+ KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx));
+ /* Affinity is not violating */
+ kbase_js_debug_log_current_affinities(kbdev);
+ KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js,
+ katom->affinity));
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO),
+ jc_head & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI),
+ jc_head >> 32, kctx);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO),
+ katom->affinity & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI),
+ katom->affinity >> 32, kctx);
+
+ /* start MMU, medium priority, cache clean/flush on end, clean/flush on
+ * start */
+ cfg = kctx->as_nr;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
+ cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
+
+#ifndef CONFIG_MALI_COH_GPU
+ if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_START))
+ cfg |= JS_CONFIG_START_FLUSH_NO_ACTION;
+ else
+ cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
+
+ if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_END))
+ cfg |= JS_CONFIG_END_FLUSH_NO_ACTION;
+ else
+ cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+#endif /* CONFIG_MALI_COH_GPU */
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10649) ||
+ !kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3982))
+ cfg |= JS_CONFIG_START_MMU;
+
+ cfg |= JS_CONFIG_THREAD_PRI(8);
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) &&
+ (katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED))
+ cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
+
+ if (kbase_hw_has_feature(kbdev,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+ if (!kbdev->hwaccess.backend.slot_rb[js].job_chain_flag) {
+ cfg |= JS_CONFIG_JOB_CHAIN_FLAG;
+ katom->atom_flags |= KBASE_KATOM_FLAGS_JOBCHAIN;
+ kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
+ true;
+ } else {
+ katom->atom_flags &= ~KBASE_KATOM_FLAGS_JOBCHAIN;
+ kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
+ false;
+ }
+ }
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg, kctx);
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_FLUSH_ID_NEXT),
+ katom->flush_id, kctx);
+
+ /* Write an approximate start timestamp.
+ * It's approximate because there might be a job in the HEAD register.
+ * In such cases, we'll try to make a better approximation in the IRQ
+ * handler (up to the KBASE_JS_IRQ_THROTTLE_TIME_US). */
+ katom->start_timestamp = ktime_get();
+
+ /* GO ! */
+ dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx",
+ katom, kctx, js, jc_head, katom->affinity);
+
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js,
+ (u32) katom->affinity);
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_job_slots_event(
+ GATOR_MAKE_EVENT(GATOR_JOB_SLOT_START, js),
+ kctx, kbase_jd_atom_id(kctx, katom));
+#endif
+ kbase_tlstream_tl_attrib_atom_config(katom, jc_head,
+ katom->affinity, cfg);
+ kbase_tlstream_tl_ret_ctx_lpu(
+ kctx,
+ &kbdev->gpu_props.props.raw_props.js_features[
+ katom->slot_nr]);
+ kbase_tlstream_tl_ret_atom_as(katom, &kbdev->as[kctx->as_nr]);
+ kbase_tlstream_tl_ret_atom_lpu(
+ katom,
+ &kbdev->gpu_props.props.raw_props.js_features[js],
+ "ctx_nr,atom_nr");
+#ifdef CONFIG_GPU_TRACEPOINTS
+ if (!kbase_backend_nr_atoms_submitted(kbdev, js)) {
+ /* If this is the only job on the slot, trace it as starting */
+ char js_string[16];
+
+ trace_gpu_sched_switch(
+ kbasep_make_job_slot_string(js, js_string),
+ ktime_to_ns(katom->start_timestamp),
+ (u32)katom->kctx->id, 0, katom->work_id);
+ kbdev->hwaccess.backend.slot_rb[js].last_context = katom->kctx;
+ }
+#endif
+ kbase_timeline_job_slot_submit(kbdev, kctx, katom, js);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
+ JS_COMMAND_START, katom->kctx);
+}
+
+/**
+ * kbasep_job_slot_update_head_start_timestamp - Update timestamp
+ * @kbdev: kbase device
+ * @js: job slot
+ * @end_timestamp: timestamp
+ *
+ * Update the start_timestamp of the job currently in the HEAD, based on the
+ * fact that we got an IRQ for the previous set of completed jobs.
+ *
+ * The estimate also takes into account the %KBASE_JS_IRQ_THROTTLE_TIME_US and
+ * the time the job was submitted, to work out the best estimate (which might
+ * still result in an over-estimate to the calculated time spent)
+ */
+static void kbasep_job_slot_update_head_start_timestamp(
+ struct kbase_device *kbdev,
+ int js,
+ ktime_t end_timestamp)
+{
+ if (kbase_backend_nr_atoms_on_slot(kbdev, js) > 0) {
+ struct kbase_jd_atom *katom;
+ ktime_t new_timestamp;
+ ktime_t timestamp_diff;
+ /* The atom in the HEAD */
+ katom = kbase_gpu_inspect(kbdev, js, 0);
+
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ /* Account for any IRQ Throttle time - makes an overestimate of
+ * the time spent by the job */
+ new_timestamp = ktime_sub_ns(end_timestamp,
+ KBASE_JS_IRQ_THROTTLE_TIME_US * 1000);
+ timestamp_diff = ktime_sub(new_timestamp,
+ katom->start_timestamp);
+ if (ktime_to_ns(timestamp_diff) >= 0) {
+ /* Only update the timestamp if it's a better estimate
+ * than what's currently stored. This is because our
+ * estimate that accounts for the throttle time may be
+ * too much of an overestimate */
+ katom->start_timestamp = new_timestamp;
+ }
+ }
+}
+
+/**
+ * kbasep_trace_tl_nret_atom_lpu - Call nret_atom_lpu timeline tracepoint
+ * @kbdev: kbase device
+ * @js: job slot
+ *
+ * Get kbase atom by calling kbase_gpu_inspect for given job slot.
+ * Then use obtained katom and name of slot associated with the given
+ * job slot number in tracepoint call to the instrumentation module
+ * informing that given atom is no longer executed on given lpu (job slot).
+ */
+static void kbasep_trace_tl_nret_atom_lpu(struct kbase_device *kbdev, int js)
+{
+ int i;
+ for (i = 0;
+ i < kbase_backend_nr_atoms_submitted(kbdev, js);
+ i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ kbase_tlstream_tl_nret_atom_lpu(katom,
+ &kbdev->gpu_props.props.raw_props.js_features[js]);
+ }
+}
+
+/**
+ * kbasep_trace_tl_event_lpu_softstop - Call event_lpu_softstop timeline
+ * tracepoint
+ * @kbdev: kbase device
+ * @js: job slot
+ *
+ * Make a tracepoint call to the instrumentation module informing that
+ * softstop happened on given lpu (job slot).
+ */
+static void kbasep_trace_tl_event_lpu_softstop(struct kbase_device *kbdev,
+ int js)
+{
+ kbase_tlstream_tl_event_lpu_softstop(
+ &kbdev->gpu_props.props.raw_props.js_features[js]);
+}
+
+void kbase_job_done(struct kbase_device *kbdev, u32 done)
+{
+ unsigned long flags;
+ int i;
+ u32 count = 0;
+ ktime_t end_timestamp = ktime_get();
+ struct kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done);
+
+ memset(&kbdev->slot_submit_count_irq[0], 0,
+ sizeof(kbdev->slot_submit_count_irq));
+
+ /* write irq throttle register, this will prevent irqs from occurring
+ * until the given number of gpu clock cycles have passed */
+ {
+ int irq_throttle_cycles =
+ atomic_read(&kbdev->irq_throttle_cycles);
+
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_THROTTLE),
+ irq_throttle_cycles, NULL);
+ }
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ while (done) {
+ u32 failed = done >> 16;
+
+ /* treat failed slots as finished slots */
+ u32 finished = (done & 0xFFFF) | failed;
+
+ /* Note: This is inherently unfair, as we always check
+ * for lower numbered interrupts before the higher
+ * numbered ones.*/
+ i = ffs(finished) - 1;
+ KBASE_DEBUG_ASSERT(i >= 0);
+
+ do {
+ int nr_done;
+ u32 active;
+ u32 completion_code = BASE_JD_EVENT_DONE;/* assume OK */
+ u64 job_tail = 0;
+
+ if (failed & (1u << i)) {
+ /* read out the job slot status code if the job
+ * slot reported failure */
+ completion_code = kbase_reg_read(kbdev,
+ JOB_SLOT_REG(i, JS_STATUS), NULL);
+
+ switch (completion_code) {
+ case BASE_JD_EVENT_STOPPED:
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_job_slots_event(
+ GATOR_MAKE_EVENT(
+ GATOR_JOB_SLOT_SOFT_STOPPED, i),
+ NULL, 0);
+#endif
+
+ kbasep_trace_tl_event_lpu_softstop(
+ kbdev, i);
+
+ kbasep_trace_tl_nret_atom_lpu(
+ kbdev, i);
+
+ /* Soft-stopped job - read the value of
+ * JS<n>_TAIL so that the job chain can
+ * be resumed */
+ job_tail = (u64)kbase_reg_read(kbdev,
+ JOB_SLOT_REG(i, JS_TAIL_LO),
+ NULL) |
+ ((u64)kbase_reg_read(kbdev,
+ JOB_SLOT_REG(i, JS_TAIL_HI),
+ NULL) << 32);
+ break;
+ case BASE_JD_EVENT_NOT_STARTED:
+ /* PRLAM-10673 can cause a TERMINATED
+ * job to come back as NOT_STARTED, but
+ * the error interrupt helps us detect
+ * it */
+ completion_code =
+ BASE_JD_EVENT_TERMINATED;
+ /* fall through */
+ default:
+ dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
+ i, completion_code,
+ kbase_exception_name
+ (kbdev,
+ completion_code));
+ }
+
+ kbase_gpu_irq_evict(kbdev, i);
+ }
+
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR),
+ done & ((1 << i) | (1 << (i + 16))),
+ NULL);
+ active = kbase_reg_read(kbdev,
+ JOB_CONTROL_REG(JOB_IRQ_JS_STATE),
+ NULL);
+
+ if (((active >> i) & 1) == 0 &&
+ (((done >> (i + 16)) & 1) == 0)) {
+ /* There is a potential race we must work
+ * around:
+ *
+ * 1. A job slot has a job in both current and
+ * next registers
+ * 2. The job in current completes
+ * successfully, the IRQ handler reads
+ * RAWSTAT and calls this function with the
+ * relevant bit set in "done"
+ * 3. The job in the next registers becomes the
+ * current job on the GPU
+ * 4. Sometime before the JOB_IRQ_CLEAR line
+ * above the job on the GPU _fails_
+ * 5. The IRQ_CLEAR clears the done bit but not
+ * the failed bit. This atomically sets
+ * JOB_IRQ_JS_STATE. However since both jobs
+ * have now completed the relevant bits for
+ * the slot are set to 0.
+ *
+ * If we now did nothing then we'd incorrectly
+ * assume that _both_ jobs had completed
+ * successfully (since we haven't yet observed
+ * the fail bit being set in RAWSTAT).
+ *
+ * So at this point if there are no active jobs
+ * left we check to see if RAWSTAT has a failure
+ * bit set for the job slot. If it does we know
+ * that there has been a new failure that we
+ * didn't previously know about, so we make sure
+ * that we record this in active (but we wait
+ * for the next loop to deal with it).
+ *
+ * If we were handling a job failure (i.e. done
+ * has the relevant high bit set) then we know
+ * that the value read back from
+ * JOB_IRQ_JS_STATE is the correct number of
+ * remaining jobs because the failed job will
+ * have prevented any futher jobs from starting
+ * execution.
+ */
+ u32 rawstat = kbase_reg_read(kbdev,
+ JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
+
+ if ((rawstat >> (i + 16)) & 1) {
+ /* There is a failed job that we've
+ * missed - add it back to active */
+ active |= (1u << i);
+ }
+ }
+
+ dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n",
+ completion_code);
+
+ nr_done = kbase_backend_nr_atoms_submitted(kbdev, i);
+ nr_done -= (active >> i) & 1;
+ nr_done -= (active >> (i + 16)) & 1;
+
+ if (nr_done <= 0) {
+ dev_warn(kbdev->dev, "Spurious interrupt on slot %d",
+ i);
+
+ goto spurious;
+ }
+
+ count += nr_done;
+
+ while (nr_done) {
+ if (nr_done == 1) {
+ kbase_gpu_complete_hw(kbdev, i,
+ completion_code,
+ job_tail,
+ &end_timestamp);
+ kbase_jm_try_kick_all(kbdev);
+ } else {
+ /* More than one job has completed.
+ * Since this is not the last job being
+ * reported this time it must have
+ * passed. This is because the hardware
+ * will not allow further jobs in a job
+ * slot to complete until the failed job
+ * is cleared from the IRQ status.
+ */
+ kbase_gpu_complete_hw(kbdev, i,
+ BASE_JD_EVENT_DONE,
+ 0,
+ &end_timestamp);
+ }
+ nr_done--;
+ }
+ spurious:
+ done = kbase_reg_read(kbdev,
+ JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) {
+ /* Workaround for missing interrupt caused by
+ * PRLAM-10883 */
+ if (((active >> i) & 1) && (0 ==
+ kbase_reg_read(kbdev,
+ JOB_SLOT_REG(i,
+ JS_STATUS), NULL))) {
+ /* Force job slot to be processed again
+ */
+ done |= (1u << i);
+ }
+ }
+
+ failed = done >> 16;
+ finished = (done & 0xFFFF) | failed;
+ if (done)
+ end_timestamp = ktime_get();
+ } while (finished & (1 << i));
+
+ kbasep_job_slot_update_head_start_timestamp(kbdev, i,
+ end_timestamp);
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+#if KBASE_GPU_RESET_EN
+ if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_COMMITTED) {
+ /* If we're trying to reset the GPU then we might be able to do
+ * it early (without waiting for a timeout) because some jobs
+ * have completed
+ */
+ kbasep_try_reset_gpu_early(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count);
+}
+KBASE_EXPORT_TEST_API(kbase_job_done);
+
+static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ bool soft_stops_allowed = true;
+
+ if (kbase_jd_katom_is_protected(katom)) {
+ soft_stops_allowed = false;
+ } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
+ if ((katom->core_req & BASE_JD_REQ_T) != 0)
+ soft_stops_allowed = false;
+ }
+ return soft_stops_allowed;
+}
+
+static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
+ base_jd_core_req core_reqs)
+{
+ bool hard_stops_allowed = true;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8394)) {
+ if ((core_reqs & BASE_JD_REQ_T) != 0)
+ hard_stops_allowed = false;
+ }
+ return hard_stops_allowed;
+}
+
+void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+ int js,
+ u32 action,
+ base_jd_core_req core_reqs,
+ struct kbase_jd_atom *target_katom)
+{
+ struct kbase_context *kctx = target_katom->kctx;
+#if KBASE_TRACE_ENABLE
+ u32 status_reg_before;
+ u64 job_in_head_before;
+ u32 status_reg_after;
+
+ KBASE_DEBUG_ASSERT(!(action & (~JS_COMMAND_MASK)));
+
+ /* Check the head pointer */
+ job_in_head_before = ((u64) kbase_reg_read(kbdev,
+ JOB_SLOT_REG(js, JS_HEAD_LO), NULL))
+ | (((u64) kbase_reg_read(kbdev,
+ JOB_SLOT_REG(js, JS_HEAD_HI), NULL))
+ << 32);
+ status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS),
+ NULL);
+#endif
+
+ if (action == JS_COMMAND_SOFT_STOP) {
+ bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev,
+ target_katom);
+
+ if (!soft_stop_allowed) {
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kbdev->dev,
+ "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X",
+ (unsigned int)core_reqs);
+#endif /* CONFIG_MALI_DEBUG */
+ return;
+ }
+
+ /* We are about to issue a soft stop, so mark the atom as having
+ * been soft stopped */
+ target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED;
+
+ /* Mark the point where we issue the soft-stop command */
+ kbase_tlstream_tl_event_atom_softstop_issue(target_katom);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+ int i;
+
+ for (i = 0;
+ i < kbase_backend_nr_atoms_submitted(kbdev, js);
+ i++) {
+ struct kbase_jd_atom *katom;
+
+ katom = kbase_gpu_inspect(kbdev, js, i);
+
+ KBASE_DEBUG_ASSERT(katom);
+
+ /* For HW_ISSUE_8316, only 'bad' jobs attacking
+ * the system can cause this issue: normally,
+ * all memory should be allocated in multiples
+ * of 4 pages, and growable memory should be
+ * changed size in multiples of 4 pages.
+ *
+ * Whilst such 'bad' jobs can be cleared by a
+ * GPU reset, the locking up of a uTLB entry
+ * caused by the bad job could also stall other
+ * ASs, meaning that other ASs' jobs don't
+ * complete in the 'grace' period before the
+ * reset. We don't want to lose other ASs' jobs
+ * when they would normally complete fine, so we
+ * must 'poke' the MMU regularly to help other
+ * ASs complete */
+ kbase_as_poking_timer_retain_atom(
+ kbdev, katom->kctx, katom);
+ }
+ }
+
+ if (kbase_hw_has_feature(
+ kbdev,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+ action = (target_katom->atom_flags &
+ KBASE_KATOM_FLAGS_JOBCHAIN) ?
+ JS_COMMAND_SOFT_STOP_1 :
+ JS_COMMAND_SOFT_STOP_0;
+ }
+ } else if (action == JS_COMMAND_HARD_STOP) {
+ bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev,
+ core_reqs);
+
+ if (!hard_stop_allowed) {
+ /* Jobs can be hard-stopped for the following reasons:
+ * * CFS decides the job has been running too long (and
+ * soft-stop has not occurred). In this case the GPU
+ * will be reset by CFS if the job remains on the
+ * GPU.
+ *
+ * * The context is destroyed, kbase_jd_zap_context
+ * will attempt to hard-stop the job. However it also
+ * has a watchdog which will cause the GPU to be
+ * reset if the job remains on the GPU.
+ *
+ * * An (unhandled) MMU fault occurred. As long as
+ * BASE_HW_ISSUE_8245 is defined then the GPU will be
+ * reset.
+ *
+ * All three cases result in the GPU being reset if the
+ * hard-stop fails, so it is safe to just return and
+ * ignore the hard-stop request.
+ */
+ dev_warn(kbdev->dev,
+ "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X",
+ (unsigned int)core_reqs);
+ return;
+ }
+ target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED;
+
+ if (kbase_hw_has_feature(
+ kbdev,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+ action = (target_katom->atom_flags &
+ KBASE_KATOM_FLAGS_JOBCHAIN) ?
+ JS_COMMAND_HARD_STOP_1 :
+ JS_COMMAND_HARD_STOP_0;
+ }
+ }
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action, kctx);
+
+#if KBASE_TRACE_ENABLE
+ status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS),
+ NULL);
+ if (status_reg_after == BASE_JD_EVENT_ACTIVE) {
+ struct kbase_jd_atom *head;
+ struct kbase_context *head_kctx;
+
+ head = kbase_gpu_inspect(kbdev, js, 0);
+ head_kctx = head->kctx;
+
+ if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, head_kctx,
+ head, job_in_head_before, js);
+ else
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+ 0, js);
+
+ switch (action) {
+ case JS_COMMAND_SOFT_STOP:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_SOFT_STOP_0:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_SOFT_STOP_1:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_HARD_STOP:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_HARD_STOP_0:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, head_kctx,
+ head, head->jc, js);
+ break;
+ case JS_COMMAND_HARD_STOP_1:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, head_kctx,
+ head, head->jc, js);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ } else {
+ if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+ job_in_head_before, js);
+ else
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+ 0, js);
+
+ switch (action) {
+ case JS_COMMAND_SOFT_STOP:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0,
+ js);
+ break;
+ case JS_COMMAND_SOFT_STOP_0:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, NULL, NULL,
+ 0, js);
+ break;
+ case JS_COMMAND_SOFT_STOP_1:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, NULL, NULL,
+ 0, js);
+ break;
+ case JS_COMMAND_HARD_STOP:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0,
+ js);
+ break;
+ case JS_COMMAND_HARD_STOP_0:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, NULL, NULL,
+ 0, js);
+ break;
+ case JS_COMMAND_HARD_STOP_1:
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, NULL, NULL,
+ 0, js);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+#endif
+}
+
+void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+ int i;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ /* Cancel any remaining running jobs for this kctx */
+ mutex_lock(&kctx->jctx.lock);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Invalidate all jobs in context, to prevent re-submitting */
+ for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
+ if (!work_pending(&kctx->jctx.atoms[i].work))
+ kctx->jctx.atoms[i].event_code =
+ BASE_JD_EVENT_JOB_CANCELLED;
+ }
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ kbase_job_slot_hardstop(kctx, i, NULL);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kctx->jctx.lock);
+}
+
+void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+ struct kbase_jd_atom *target_katom)
+{
+ struct kbase_device *kbdev;
+ int js = target_katom->slot_nr;
+ int priority = target_katom->sched_priority;
+ int i;
+ bool stop_sent = false;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
+ struct kbase_jd_atom *katom;
+
+ katom = kbase_gpu_inspect(kbdev, js, i);
+ if (!katom)
+ continue;
+
+ if (katom->kctx != kctx)
+ continue;
+
+ if (katom->sched_priority > priority) {
+ if (!stop_sent)
+ kbase_tlstream_tl_attrib_atom_priority_change(
+ target_katom);
+
+ kbase_job_slot_softstop(kbdev, js, katom);
+ stop_sent = true;
+ }
+ }
+}
+
+struct zap_reset_data {
+ /* The stages are:
+ * 1. The timer has never been called
+ * 2. The zap has timed out, all slots are soft-stopped - the GPU reset
+ * will happen. The GPU has been reset when
+ * kbdev->hwaccess.backend.reset_waitq is signalled
+ *
+ * (-1 - The timer has been cancelled)
+ */
+ int stage;
+ struct kbase_device *kbdev;
+ struct hrtimer timer;
+ spinlock_t lock; /* protects updates to stage member */
+};
+
+static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
+{
+ struct zap_reset_data *reset_data = container_of(timer,
+ struct zap_reset_data, timer);
+ struct kbase_device *kbdev = reset_data->kbdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&reset_data->lock, flags);
+
+ if (reset_data->stage == -1)
+ goto out;
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_prepare_to_reset_gpu(kbdev)) {
+ dev_err(kbdev->dev, "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
+ ZAP_TIMEOUT);
+ kbase_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ reset_data->stage = 2;
+
+ out:
+ spin_unlock_irqrestore(&reset_data->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct zap_reset_data reset_data;
+ unsigned long flags;
+
+ hrtimer_init_on_stack(&reset_data.timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ reset_data.timer.function = zap_timeout_callback;
+
+ spin_lock_init(&reset_data.lock);
+
+ reset_data.kbdev = kbdev;
+ reset_data.stage = 1;
+
+ hrtimer_start(&reset_data.timer, HR_TIMER_DELAY_MSEC(ZAP_TIMEOUT),
+ HRTIMER_MODE_REL);
+
+ /* Wait for all jobs to finish, and for the context to be not-scheduled
+ * (due to kbase_job_zap_context(), we also guarentee it's not in the JS
+ * policy queue either */
+ wait_event(kctx->jctx.zero_jobs_wait, kctx->jctx.job_nr == 0);
+ wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
+ !kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ spin_lock_irqsave(&reset_data.lock, flags);
+ if (reset_data.stage == 1) {
+ /* The timer hasn't run yet - so cancel it */
+ reset_data.stage = -1;
+ }
+ spin_unlock_irqrestore(&reset_data.lock, flags);
+
+ hrtimer_cancel(&reset_data.timer);
+
+ if (reset_data.stage == 2) {
+ /* The reset has already started.
+ * Wait for the reset to complete
+ */
+ wait_event(kbdev->hwaccess.backend.reset_wait,
+ atomic_read(&kbdev->hwaccess.backend.reset_gpu)
+ == KBASE_RESET_GPU_NOT_PENDING);
+ }
+ destroy_hrtimer_on_stack(&reset_data.timer);
+
+ dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
+
+ /* Ensure that the signallers of the waitqs have finished */
+ mutex_lock(&kctx->jctx.lock);
+ mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&kctx->jctx.lock);
+}
+
+u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev)
+{
+ u32 flush_id = 0;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) {
+ mutex_lock(&kbdev->pm.lock);
+ if (kbdev->pm.backend.gpu_powered)
+ flush_id = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(LATEST_FLUSH), NULL);
+ mutex_unlock(&kbdev->pm.lock);
+ }
+
+ return flush_id;
+}
+
+int kbase_job_slot_init(struct kbase_device *kbdev)
+{
+#if KBASE_GPU_RESET_EN
+ kbdev->hwaccess.backend.reset_workq = alloc_workqueue(
+ "Mali reset workqueue", 0, 1);
+ if (NULL == kbdev->hwaccess.backend.reset_workq)
+ return -EINVAL;
+
+ KBASE_DEBUG_ASSERT(0 ==
+ object_is_on_stack(&kbdev->hwaccess.backend.reset_work));
+ INIT_WORK(&kbdev->hwaccess.backend.reset_work,
+ kbasep_reset_timeout_worker);
+
+ hrtimer_init(&kbdev->hwaccess.backend.reset_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ kbdev->hwaccess.backend.reset_timer.function =
+ kbasep_reset_timer_callback;
+#endif
+
+ return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_init);
+
+void kbase_job_slot_halt(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbase_job_slot_term(struct kbase_device *kbdev)
+{
+#if KBASE_GPU_RESET_EN
+ destroy_workqueue(kbdev->hwaccess.backend.reset_workq);
+#endif
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_term);
+
+#if KBASE_GPU_RESET_EN
+/**
+ * kbasep_check_for_afbc_on_slot() - Check whether AFBC is in use on this slot
+ * @kbdev: kbase device pointer
+ * @kctx: context to check against
+ * @js: slot to check
+ * @target_katom: An atom to check, or NULL if all atoms from @kctx on
+ * slot @js should be checked
+ *
+ * This checks are based upon parameters that would normally be passed to
+ * kbase_job_slot_hardstop().
+ *
+ * In the event of @target_katom being NULL, this will check the last jobs that
+ * are likely to be running on the slot to see if a) they belong to kctx, and
+ * so would be stopped, and b) whether they have AFBC
+ *
+ * In that case, It's guaranteed that a job currently executing on the HW with
+ * AFBC will be detected. However, this is a conservative check because it also
+ * detects jobs that have just completed too.
+ *
+ * Return: true when hard-stop _might_ stop an afbc atom, else false.
+ */
+static bool kbasep_check_for_afbc_on_slot(struct kbase_device *kbdev,
+ struct kbase_context *kctx, int js,
+ struct kbase_jd_atom *target_katom)
+{
+ bool ret = false;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* When we have an atom the decision can be made straight away. */
+ if (target_katom)
+ return !!(target_katom->core_req & BASE_JD_REQ_FS_AFBC);
+
+ /* Otherwise, we must chweck the hardware to see if it has atoms from
+ * this context with AFBC. */
+ for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
+ struct kbase_jd_atom *katom;
+
+ katom = kbase_gpu_inspect(kbdev, js, i);
+ if (!katom)
+ continue;
+
+ /* Ignore atoms from other contexts, they won't be stopped when
+ * we use this for checking if we should hard-stop them */
+ if (katom->kctx != kctx)
+ continue;
+
+ /* An atom on this slot and this context: check for AFBC */
+ if (katom->core_req & BASE_JD_REQ_FS_AFBC) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+#endif /* KBASE_GPU_RESET_EN */
+
+/**
+ * kbase_job_slot_softstop_swflags - Soft-stop a job with flags
+ * @kbdev: The kbase device
+ * @js: The job slot to soft-stop
+ * @target_katom: The job that should be soft-stopped (or NULL for any job)
+ * @sw_flags: Flags to pass in about the soft-stop
+ *
+ * Context:
+ * The job slot lock must be held when calling this function.
+ * The job slot must not already be in the process of being soft-stopped.
+ *
+ * Soft-stop the specified job slot, with extra information about the stop
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ */
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+ struct kbase_jd_atom *target_katom, u32 sw_flags)
+{
+ KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK));
+ kbase_backend_soft_hard_stop_slot(kbdev, NULL, js, target_katom,
+ JS_COMMAND_SOFT_STOP | sw_flags);
+}
+
+/**
+ * kbase_job_slot_softstop - Soft-stop the specified job slot
+ * @kbdev: The kbase device
+ * @js: The job slot to soft-stop
+ * @target_katom: The job that should be soft-stopped (or NULL for any job)
+ * Context:
+ * The job slot lock must be held when calling this function.
+ * The job slot must not already be in the process of being soft-stopped.
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ */
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+ struct kbase_jd_atom *target_katom)
+{
+ kbase_job_slot_softstop_swflags(kbdev, js, target_katom, 0u);
+}
+
+/**
+ * kbase_job_slot_hardstop - Hard-stop the specified job slot
+ * @kctx: The kbase context that contains the job(s) that should
+ * be hard-stopped
+ * @js: The job slot to hard-stop
+ * @target_katom: The job that should be hard-stopped (or NULL for all
+ * jobs from the context)
+ * Context:
+ * The job slot lock must be held when calling this function.
+ */
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+ struct kbase_jd_atom *target_katom)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ bool stopped;
+#if KBASE_GPU_RESET_EN
+ /* We make the check for AFBC before evicting/stopping atoms. Note
+ * that no other thread can modify the slots whilst we have the
+ * hwaccess_lock. */
+ int needs_workaround_for_afbc =
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3542)
+ && kbasep_check_for_afbc_on_slot(kbdev, kctx, js,
+ target_katom);
+#endif
+
+ stopped = kbase_backend_soft_hard_stop_slot(kbdev, kctx, js,
+ target_katom,
+ JS_COMMAND_HARD_STOP);
+#if KBASE_GPU_RESET_EN
+ if (stopped && (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) ||
+ kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510) ||
+ needs_workaround_for_afbc)) {
+ /* MIDBASE-2916 if a fragment job with AFBC encoding is
+ * hardstopped, ensure to do a soft reset also in order to
+ * clear the GPU status.
+ * Workaround for HW issue 8401 has an issue,so after
+ * hard-stopping just reset the GPU. This will ensure that the
+ * jobs leave the GPU.*/
+ if (kbase_prepare_to_reset_gpu_locked(kbdev)) {
+ dev_err(kbdev->dev, "Issueing GPU soft-reset after hard stopping due to hardware issue");
+ kbase_reset_gpu_locked(kbdev);
+ }
+ }
+#endif
+}
+
+/**
+ * kbase_job_check_enter_disjoint - potentiall enter disjoint mode
+ * @kbdev: kbase device
+ * @action: the event which has occurred
+ * @core_reqs: core requirements of the atom
+ * @target_katom: the atom which is being affected
+ *
+ * For a certain soft/hard-stop action, work out whether to enter disjoint
+ * state.
+ *
+ * This does not register multiple disjoint events if the atom has already
+ * started a disjoint period
+ *
+ * @core_reqs can be supplied as 0 if the atom had not started on the hardware
+ * (and so a 'real' soft/hard-stop was not required, but it still interrupted
+ * flow, perhaps on another context)
+ *
+ * kbase_job_check_leave_disjoint() should be used to end the disjoint
+ * state when the soft/hard-stop action is complete
+ */
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+ base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom)
+{
+ u32 hw_action = action & JS_COMMAND_MASK;
+
+ /* For hard-stop, don't enter if hard-stop not allowed */
+ if (hw_action == JS_COMMAND_HARD_STOP &&
+ !kbasep_hard_stop_allowed(kbdev, core_reqs))
+ return;
+
+ /* For soft-stop, don't enter if soft-stop not allowed, or isn't
+ * causing disjoint */
+ if (hw_action == JS_COMMAND_SOFT_STOP &&
+ !(kbasep_soft_stop_allowed(kbdev, target_katom) &&
+ (action & JS_COMMAND_SW_CAUSES_DISJOINT)))
+ return;
+
+ /* Nothing to do if already logged disjoint state on this atom */
+ if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT)
+ return;
+
+ target_katom->atom_flags |= KBASE_KATOM_FLAG_IN_DISJOINT;
+ kbase_disjoint_state_up(kbdev);
+}
+
+/**
+ * kbase_job_check_enter_disjoint - potentially leave disjoint state
+ * @kbdev: kbase device
+ * @target_katom: atom which is finishing
+ *
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+ struct kbase_jd_atom *target_katom)
+{
+ if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) {
+ target_katom->atom_flags &= ~KBASE_KATOM_FLAG_IN_DISJOINT;
+ kbase_disjoint_state_down(kbdev);
+ }
+}
+
+
+#if KBASE_GPU_RESET_EN
+static void kbase_debug_dump_registers(struct kbase_device *kbdev)
+{
+ int i;
+
+ kbase_io_history_dump(kbdev);
+
+ dev_err(kbdev->dev, "Register state:");
+ dev_err(kbdev->dev, " GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL));
+ dev_err(kbdev->dev, " JOB_IRQ_RAWSTAT=0x%08x JOB_IRQ_JS_STATE=0x%08x JOB_IRQ_THROTTLE=0x%08x",
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL),
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE), NULL),
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_THROTTLE), NULL));
+ for (i = 0; i < 3; i++) {
+ dev_err(kbdev->dev, " JS%d_STATUS=0x%08x JS%d_HEAD_LO=0x%08x",
+ i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS),
+ NULL),
+ i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO),
+ NULL));
+ }
+ dev_err(kbdev->dev, " MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x",
+ kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL));
+ dev_err(kbdev->dev, " GPU_IRQ_MASK=0x%08x JOB_IRQ_MASK=0x%08x MMU_IRQ_MASK=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL),
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), NULL),
+ kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL));
+ dev_err(kbdev->dev, " PWR_OVERRIDE0=0x%08x PWR_OVERRIDE1=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1), NULL));
+ dev_err(kbdev->dev, " SHADER_CONFIG=0x%08x L2_MMU_CONFIG=0x%08x",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL));
+}
+
+static void kbasep_reset_timeout_worker(struct work_struct *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev;
+ int i;
+ ktime_t end_timestamp = ktime_get();
+ struct kbasep_js_device_data *js_devdata;
+ bool try_schedule = false;
+ bool silent = false;
+ u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+
+ KBASE_DEBUG_ASSERT(data);
+
+ kbdev = container_of(data, struct kbase_device,
+ hwaccess.backend.reset_work);
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+
+ if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_SILENT)
+ silent = true;
+
+ KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0);
+
+ /* Suspend vinstr.
+ * This call will block until vinstr is suspended. */
+ kbase_vinstr_suspend(kbdev->vinstr_ctx);
+
+ /* Make sure the timer has completed - this cannot be done from
+ * interrupt context, so this cannot be done within
+ * kbasep_try_reset_gpu_early. */
+ hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
+
+ if (kbase_pm_context_active_handle_suspend(kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ /* This would re-activate the GPU. Since it's already idle,
+ * there's no need to reset it */
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_NOT_PENDING);
+ kbase_disjoint_state_down(kbdev);
+ wake_up(&kbdev->hwaccess.backend.reset_wait);
+ return;
+ }
+
+ KBASE_DEBUG_ASSERT(kbdev->irq_reset_flush == false);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ spin_lock(&kbdev->hwaccess_lock);
+ spin_lock(&kbdev->mmu_mask_change);
+ /* We're about to flush out the IRQs and their bottom half's */
+ kbdev->irq_reset_flush = true;
+
+ /* Disable IRQ to avoid IRQ handlers to kick in after releasing the
+ * spinlock; this also clears any outstanding interrupts */
+ kbase_pm_disable_interrupts_nolock(kbdev);
+
+ spin_unlock(&kbdev->mmu_mask_change);
+ spin_unlock(&kbdev->hwaccess_lock);
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Ensure that any IRQ handlers have finished
+ * Must be done without any locks IRQ handlers will take */
+ kbase_synchronize_irqs(kbdev);
+
+ /* Flush out any in-flight work items */
+ kbase_flush_mmu_wqs(kbdev);
+
+ /* The flush has completed so reset the active indicator */
+ kbdev->irq_reset_flush = false;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8463)) {
+ /* Ensure that L2 is not transitioning when we send the reset
+ * command */
+ while (--max_loops && kbase_pm_get_trans_cores(kbdev,
+ KBASE_PM_CORE_L2))
+ ;
+
+ WARN(!max_loops, "L2 power transition timed out while trying to reset\n");
+ }
+
+ mutex_lock(&kbdev->pm.lock);
+ /* We hold the pm lock, so there ought to be a current policy */
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.pm_current_policy);
+
+ /* All slot have been soft-stopped and we've waited
+ * SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point we
+ * assume that anything that is still left on the GPU is stuck there and
+ * we'll kill it when we reset the GPU */
+
+ if (!silent)
+ dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
+ RESET_TIMEOUT);
+
+ /* Output the state of some interesting registers to help in the
+ * debugging of GPU resets */
+ if (!silent)
+ kbase_debug_dump_registers(kbdev);
+
+ /* Reset the GPU */
+ kbase_pm_init_hw(kbdev, 0);
+
+ /* Complete any jobs that were still on the GPU */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_backend_reset(kbdev, &end_timestamp);
+ kbase_pm_metrics_update(kbdev, NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->pm.lock);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ /* Reprogram the GPU's MMU */
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (js_devdata->runpool_irq.per_as_data[i].kctx)
+ kbase_mmu_update(
+ js_devdata->runpool_irq.per_as_data[i].kctx);
+ else
+ kbase_mmu_disable_as(kbdev, i);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ kbase_pm_enable_interrupts(kbdev);
+
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_NOT_PENDING);
+
+ kbase_disjoint_state_down(kbdev);
+
+ wake_up(&kbdev->hwaccess.backend.reset_wait);
+ if (!silent)
+ dev_err(kbdev->dev, "Reset complete");
+
+ if (js_devdata->nr_contexts_pullable > 0 && !kbdev->poweroff_pending)
+ try_schedule = true;
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ mutex_lock(&kbdev->pm.lock);
+
+ /* Find out what cores are required now */
+ kbase_pm_update_cores_state(kbdev);
+
+ /* Synchronously request and wait for those cores, because if
+ * instrumentation is enabled it would need them immediately. */
+ kbase_pm_check_transitions_sync(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* Try submitting some jobs to restart processing */
+ if (try_schedule) {
+ KBASE_TRACE_ADD(kbdev, JM_SUBMIT_AFTER_RESET, NULL, NULL, 0u,
+ 0);
+ kbase_js_sched_all(kbdev);
+ }
+
+ kbase_pm_context_idle(kbdev);
+
+ /* Release vinstr */
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0);
+}
+
+static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer)
+{
+ struct kbase_device *kbdev = container_of(timer, struct kbase_device,
+ hwaccess.backend.reset_timer);
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Reset still pending? */
+ if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) ==
+ KBASE_RESET_GPU_COMMITTED)
+ queue_work(kbdev->hwaccess.backend.reset_workq,
+ &kbdev->hwaccess.backend.reset_work);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * If all jobs are evicted from the GPU then we can reset the GPU
+ * immediately instead of waiting for the timeout to elapse
+ */
+
+static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev)
+{
+ int i;
+ int pending_jobs = 0;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Count the number of jobs */
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ pending_jobs += kbase_backend_nr_atoms_submitted(kbdev, i);
+
+ if (pending_jobs > 0) {
+ /* There are still jobs on the GPU - wait */
+ return;
+ }
+
+ /* To prevent getting incorrect registers when dumping failed job,
+ * skip early reset.
+ */
+ if (kbdev->job_fault_debug != false)
+ return;
+
+ /* Check that the reset has been committed to (i.e. kbase_reset_gpu has
+ * been called), and that no other thread beat this thread to starting
+ * the reset */
+ if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) !=
+ KBASE_RESET_GPU_COMMITTED) {
+ /* Reset has already occurred */
+ return;
+ }
+
+ queue_work(kbdev->hwaccess.backend.reset_workq,
+ &kbdev->hwaccess.backend.reset_work);
+}
+
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+
+ js_devdata = &kbdev->js_data;
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbasep_try_reset_gpu_early_locked(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+/**
+ * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU
+ * @kbdev: kbase device
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return:
+ * The function returns a boolean which should be interpreted as follows:
+ * true - Prepared for reset, kbase_reset_gpu_locked should be called.
+ * false - Another thread is performing a reset, kbase_reset_gpu should
+ * not be called.
+ */
+bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev)
+{
+ int i;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_NOT_PENDING,
+ KBASE_RESET_GPU_PREPARED) !=
+ KBASE_RESET_GPU_NOT_PENDING) {
+ /* Some other thread is already resetting the GPU */
+ return false;
+ }
+
+ kbase_disjoint_state_up(kbdev);
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ kbase_job_slot_softstop(kbdev, i, NULL);
+
+ return true;
+}
+
+bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ bool ret;
+ struct kbasep_js_device_data *js_devdata;
+
+ js_devdata = &kbdev->js_data;
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ ret = kbase_prepare_to_reset_gpu_locked(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return ret;
+}
+KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu);
+
+/*
+ * This function should be called after kbase_prepare_to_reset_gpu if it
+ * returns true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for
+ * kbdev->hwaccess.backend.reset_waitq to be signalled to know when the reset
+ * has completed.
+ */
+void kbase_reset_gpu(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Note this is an assert/atomic_set because it is a software issue for
+ * a race to be occuring here */
+ KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_PREPARED);
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_COMMITTED);
+
+ dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
+ kbdev->reset_timeout_ms);
+
+ hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
+ HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
+ HRTIMER_MODE_REL);
+
+ /* Try resetting early */
+ kbasep_try_reset_gpu_early(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_reset_gpu);
+
+void kbase_reset_gpu_locked(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Note this is an assert/atomic_set because it is a software issue for
+ * a race to be occuring here */
+ KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_PREPARED);
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_COMMITTED);
+
+ dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
+ kbdev->reset_timeout_ms);
+ hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
+ HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
+ HRTIMER_MODE_REL);
+
+ /* Try resetting early */
+ kbasep_try_reset_gpu_early_locked(kbdev);
+}
+
+void kbase_reset_gpu_silent(struct kbase_device *kbdev)
+{
+ if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ KBASE_RESET_GPU_NOT_PENDING,
+ KBASE_RESET_GPU_SILENT) !=
+ KBASE_RESET_GPU_NOT_PENDING) {
+ /* Some other thread is already resetting the GPU */
+ return;
+ }
+
+ kbase_disjoint_state_up(kbdev);
+
+ queue_work(kbdev->hwaccess.backend.reset_workq,
+ &kbdev->hwaccess.backend.reset_work);
+}
+
+bool kbase_reset_gpu_active(struct kbase_device *kbdev)
+{
+ if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ KBASE_RESET_GPU_NOT_PENDING)
+ return false;
+
+ return true;
+}
+#endif /* KBASE_GPU_RESET_EN */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_jm_internal.h b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_internal.h
new file mode 100755
index 0000000..89b1288
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_internal.h
@@ -0,0 +1,155 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Job Manager backend-specific low-level APIs.
+ */
+
+#ifndef _KBASE_JM_HWACCESS_H_
+#define _KBASE_JM_HWACCESS_H_
+
+#include <mali_kbase_hw.h>
+#include <mali_kbase_debug.h>
+#include <linux/atomic.h>
+
+#include <backend/gpu/mali_kbase_jm_rb.h>
+
+/**
+ * kbase_job_submit_nolock() - Submit a job to a certain job-slot
+ * @kbdev: Device pointer
+ * @katom: Atom to submit
+ * @js: Job slot to submit on
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != false before
+ * calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbase_job_submit_nolock(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, int js);
+
+/**
+ * kbase_job_done_slot() - Complete the head job on a particular job-slot
+ * @kbdev: Device pointer
+ * @s: Job slot
+ * @completion_code: Completion code of job reported by GPU
+ * @job_tail: Job tail address reported by GPU
+ * @end_timestamp: Timestamp of job completion
+ */
+void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code,
+ u64 job_tail, ktime_t *end_timestamp);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+static inline char *kbasep_make_job_slot_string(int js, char *js_string)
+{
+ sprintf(js_string, "job_slot_%i", js);
+ return js_string;
+}
+#endif
+
+/**
+ * kbase_job_hw_submit() - Submit a job to the GPU
+ * @kbdev: Device pointer
+ * @katom: Atom to submit
+ * @js: Job slot to submit on
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != false before
+ * calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbase_job_hw_submit(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ int js);
+
+/**
+ * kbasep_job_slot_soft_or_hard_stop_do_action() - Perform a soft or hard stop
+ * on the specified atom
+ * @kbdev: Device pointer
+ * @js: Job slot to stop on
+ * @action: The action to perform, either JSn_COMMAND_HARD_STOP or
+ * JSn_COMMAND_SOFT_STOP
+ * @core_reqs: Core requirements of atom to stop
+ * @target_katom: Atom to stop
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+ int js,
+ u32 action,
+ base_jd_core_req core_reqs,
+ struct kbase_jd_atom *target_katom);
+
+/**
+ * kbase_backend_soft_hard_stop_slot() - Soft or hard stop jobs on a given job
+ * slot belonging to a given context.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer. May be NULL
+ * @katom: Specific atom to stop. May be NULL
+ * @js: Job slot to hard stop
+ * @action: The action to perform, either JSn_COMMAND_HARD_STOP or
+ * JSn_COMMAND_SOFT_STOP
+ *
+ * If no context is provided then all jobs on the slot will be soft or hard
+ * stopped.
+ *
+ * If a katom is provided then only that specific atom will be stopped. In this
+ * case the kctx parameter is ignored.
+ *
+ * Jobs that are on the slot but are not yet on the GPU will be unpulled and
+ * returned to the job scheduler.
+ *
+ * Return: true if an atom was stopped, false otherwise
+ */
+bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js,
+ struct kbase_jd_atom *katom,
+ u32 action);
+
+/**
+ * kbase_job_slot_init - Initialise job slot framework
+ * @kbdev: Device pointer
+ *
+ * Called on driver initialisation
+ *
+ * Return: 0 on success
+ */
+int kbase_job_slot_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_halt - Halt the job slot framework
+ * @kbdev: Device pointer
+ *
+ * Should prevent any further job slot processing
+ */
+void kbase_job_slot_halt(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_term - Terminate job slot framework
+ * @kbdev: Device pointer
+ *
+ * Called on driver termination
+ */
+void kbase_job_slot_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_JM_HWACCESS_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_jm_rb.c b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_rb.c
new file mode 100755
index 0000000..d7b4d3f
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_rb.c
@@ -0,0 +1,1818 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_js.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_10969_workaround.h>
+#include <backend/gpu/mali_kbase_cache_policy_backend.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_affinity.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+/* Return whether the specified ringbuffer is empty. HW access lock must be
+ * held */
+#define SLOT_RB_EMPTY(rb) (rb->write_idx == rb->read_idx)
+/* Return number of atoms currently in the specified ringbuffer. HW access lock
+ * must be held */
+#define SLOT_RB_ENTRIES(rb) (int)(s8)(rb->write_idx - rb->read_idx)
+
+static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ ktime_t *end_timestamp);
+
+/**
+ * kbase_gpu_enqueue_atom - Enqueue an atom in the HW access ringbuffer
+ * @kbdev: Device pointer
+ * @katom: Atom to enqueue
+ *
+ * Context: Caller must hold the HW access lock
+ */
+static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr];
+
+ WARN_ON(SLOT_RB_ENTRIES(rb) >= SLOT_RB_SIZE);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ rb->entries[rb->write_idx & SLOT_RB_MASK].katom = katom;
+ rb->write_idx++;
+
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+}
+
+/**
+ * kbase_gpu_dequeue_atom - Remove an atom from the HW access ringbuffer, once
+ * it has been completed
+ * @kbdev: Device pointer
+ * @js: Job slot to remove atom from
+ * @end_timestamp: Pointer to timestamp of atom completion. May be NULL, in
+ * which case current time will be used.
+ *
+ * Context: Caller must hold the HW access lock
+ *
+ * Return: Atom removed from ringbuffer
+ */
+static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev,
+ int js,
+ ktime_t *end_timestamp)
+{
+ struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+ struct kbase_jd_atom *katom;
+
+ if (SLOT_RB_EMPTY(rb)) {
+ WARN(1, "GPU ringbuffer unexpectedly empty\n");
+ return NULL;
+ }
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ katom = rb->entries[rb->read_idx & SLOT_RB_MASK].katom;
+
+ kbase_gpu_release_atom(kbdev, katom, end_timestamp);
+
+ rb->read_idx++;
+
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB;
+
+ kbase_js_debug_log_current_affinities(kbdev);
+
+ return katom;
+}
+
+struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
+ int idx)
+{
+ struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if ((SLOT_RB_ENTRIES(rb) - 1) < idx)
+ return NULL; /* idx out of range */
+
+ return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom;
+}
+
+struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
+ int js)
+{
+ return kbase_gpu_inspect(kbdev, js, 0);
+}
+
+struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
+ int js)
+{
+ struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+
+ if (SLOT_RB_EMPTY(rb))
+ return NULL;
+
+ return rb->entries[(rb->write_idx - 1) & SLOT_RB_MASK].katom;
+}
+
+/**
+ * kbase_gpu_atoms_submitted - Inspect whether a slot has any atoms currently
+ * on the GPU
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return: true if there are atoms on the GPU for slot js,
+ * false otherwise
+ */
+static bool kbase_gpu_atoms_submitted(struct kbase_device *kbdev, int js)
+{
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ if (!katom)
+ return false;
+ if (katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED ||
+ katom->gpu_rb_state == KBASE_ATOM_GPU_RB_READY)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * kbase_gpu_atoms_submitted_any() - Inspect whether there are any atoms
+ * currently on the GPU
+ * @kbdev: Device pointer
+ *
+ * Return: true if there are any atoms on the GPU, false otherwise
+ */
+static bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev)
+{
+ int js;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ if (katom && katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED)
+ return true;
+ }
+ }
+ return false;
+}
+
+int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js)
+{
+ int nr = 0;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ if (katom && (katom->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED))
+ nr++;
+ }
+
+ return nr;
+}
+
+int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js)
+{
+ int nr = 0;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ if (kbase_gpu_inspect(kbdev, js, i))
+ nr++;
+ }
+
+ return nr;
+}
+
+static int kbase_gpu_nr_atoms_on_slot_min(struct kbase_device *kbdev, int js,
+ enum kbase_atom_gpu_rb_state min_rb_state)
+{
+ int nr = 0;
+ int i;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+ if (katom && (katom->gpu_rb_state >= min_rb_state))
+ nr++;
+ }
+
+ return nr;
+}
+
+/**
+ * check_secure_atom - Check if the given atom is in the given secure state and
+ * has a ringbuffer state of at least
+ * KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @katom: Atom pointer
+ * @secure: Desired secure state
+ *
+ * Return: true if atom is in the given state, false otherwise
+ */
+static bool check_secure_atom(struct kbase_jd_atom *katom, bool secure)
+{
+ if (katom->gpu_rb_state >=
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION &&
+ ((kbase_jd_katom_is_protected(katom) && secure) ||
+ (!kbase_jd_katom_is_protected(katom) && !secure)))
+ return true;
+
+ return false;
+}
+
+/**
+ * kbase_gpu_check_secure_atoms - Check if there are any atoms in the given
+ * secure state in the ringbuffers of at least
+ * state
+ * KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE
+ * @kbdev: Device pointer
+ * @secure: Desired secure state
+ *
+ * Return: true if any atoms are in the given state, false otherwise
+ */
+static bool kbase_gpu_check_secure_atoms(struct kbase_device *kbdev,
+ bool secure)
+{
+ int js, i;
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ for (i = 0; i < SLOT_RB_SIZE; i++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+ js, i);
+
+ if (katom) {
+ if (check_secure_atom(katom, secure))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+int kbase_backend_slot_free(struct kbase_device *kbdev, int js)
+{
+ if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) !=
+ KBASE_RESET_GPU_NOT_PENDING) {
+ /* The GPU is being reset - so prevent submission */
+ return 0;
+ }
+
+ return SLOT_RB_SIZE - kbase_backend_nr_atoms_on_slot(kbdev, js);
+}
+
+
+static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
+ int js,
+ struct kbase_jd_atom *katom)
+{
+ /* The most recently checked affinity. Having this at this scope allows
+ * us to guarantee that we've checked the affinity in this function
+ * call.
+ */
+ u64 recently_chosen_affinity = 0;
+ bool chosen_affinity = false;
+ bool retry;
+
+ do {
+ retry = false;
+
+ /* NOTE: The following uses a number of FALLTHROUGHs to optimize
+ * the calls to this function. Ending of the function is
+ * indicated by BREAK OUT */
+ switch (katom->coreref_state) {
+ /* State when job is first attempted to be run */
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ KBASE_DEBUG_ASSERT(katom->affinity == 0);
+
+ /* Compute affinity */
+ if (false == kbase_js_choose_affinity(
+ &recently_chosen_affinity, kbdev, katom,
+ js)) {
+ /* No cores are currently available */
+ /* *** BREAK OUT: No state transition *** */
+ break;
+ }
+
+ chosen_affinity = true;
+
+ /* Request the cores */
+ kbase_pm_request_cores(kbdev,
+ katom->core_req & BASE_JD_REQ_T,
+ recently_chosen_affinity);
+
+ katom->affinity = recently_chosen_affinity;
+
+ /* Proceed to next state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ {
+ enum kbase_pm_cores_ready cores_ready;
+
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+
+ cores_ready = kbase_pm_register_inuse_cores(
+ kbdev,
+ katom->core_req & BASE_JD_REQ_T,
+ katom->affinity);
+ if (cores_ready == KBASE_NEW_AFFINITY) {
+ /* Affinity no longer valid - return to
+ * previous state */
+ kbasep_js_job_check_deref_cores(kbdev,
+ katom);
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REGISTER_INUSE_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) katom->affinity);
+ /* *** BREAK OUT: Return to previous
+ * state, retry *** */
+ retry = true;
+ break;
+ }
+ if (cores_ready == KBASE_CORES_NOT_READY) {
+ /* Stay in this state and return, to
+ * retry at this state later */
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REGISTER_INUSE_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) katom->affinity);
+ /* *** BREAK OUT: No state transition
+ * *** */
+ break;
+ }
+ /* Proceed to next state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ }
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+
+ /* Optimize out choosing the affinity twice in the same
+ * function call */
+ if (chosen_affinity == false) {
+ /* See if the affinity changed since a previous
+ * call. */
+ if (false == kbase_js_choose_affinity(
+ &recently_chosen_affinity,
+ kbdev, katom, js)) {
+ /* No cores are currently available */
+ kbasep_js_job_check_deref_cores(kbdev,
+ katom);
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REQUEST_ON_RECHECK_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) recently_chosen_affinity);
+ /* *** BREAK OUT: Transition to lower
+ * state *** */
+ break;
+ }
+ chosen_affinity = true;
+ }
+
+ /* Now see if this requires a different set of cores */
+ if (recently_chosen_affinity != katom->affinity) {
+ enum kbase_pm_cores_ready cores_ready;
+
+ kbase_pm_request_cores(kbdev,
+ katom->core_req & BASE_JD_REQ_T,
+ recently_chosen_affinity);
+
+ /* Register new cores whilst we still hold the
+ * old ones, to minimize power transitions */
+ cores_ready =
+ kbase_pm_register_inuse_cores(kbdev,
+ katom->core_req & BASE_JD_REQ_T,
+ recently_chosen_affinity);
+ kbasep_js_job_check_deref_cores(kbdev, katom);
+
+ /* Fixup the state that was reduced by
+ * deref_cores: */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ katom->affinity = recently_chosen_affinity;
+ if (cores_ready == KBASE_NEW_AFFINITY) {
+ /* Affinity no longer valid - return to
+ * previous state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+
+ kbasep_js_job_check_deref_cores(kbdev,
+ katom);
+
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REGISTER_INUSE_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) katom->affinity);
+ /* *** BREAK OUT: Return to previous
+ * state, retry *** */
+ retry = true;
+ break;
+ }
+ /* Now might be waiting for powerup again, with
+ * a new affinity */
+ if (cores_ready == KBASE_CORES_NOT_READY) {
+ /* Return to previous state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_REGISTER_ON_RECHECK_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) katom->affinity);
+ /* *** BREAK OUT: Transition to lower
+ * state *** */
+ break;
+ }
+ }
+ /* Proceed to next state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+ case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS:
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+ KBASE_DEBUG_ASSERT(katom->affinity ==
+ recently_chosen_affinity);
+
+ /* Note: this is where the caller must've taken the
+ * hwaccess_lock */
+
+ /* Check for affinity violations - if there are any,
+ * then we just ask the caller to requeue and try again
+ * later */
+ if (kbase_js_affinity_would_violate(kbdev, js,
+ katom->affinity) != false) {
+ /* Return to previous state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ /* *** BREAK OUT: Transition to lower state ***
+ */
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev,
+ JS_CORE_REF_AFFINITY_WOULD_VIOLATE,
+ katom->kctx, katom, katom->jc, js,
+ (u32) katom->affinity);
+ break;
+ }
+
+ /* No affinity violations would result, so the cores are
+ * ready */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY;
+ /* *** BREAK OUT: Cores Ready *** */
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false,
+ "Unhandled kbase_atom_coreref_state %d",
+ katom->coreref_state);
+ break;
+ }
+ } while (retry != false);
+
+ return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY);
+}
+
+static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ switch (katom->coreref_state) {
+ case KBASE_ATOM_COREREF_STATE_READY:
+ /* State where atom was submitted to the HW - just proceed to
+ * power-down */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+
+ /* *** FALLTHROUGH *** */
+
+ case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
+ /* State where cores were registered */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+ kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
+ katom->affinity);
+
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ /* State where cores were requested, but not registered */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
+ (katom->core_req & BASE_JD_REQ_T));
+ kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
+ katom->affinity);
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ /* Initial state - nothing required */
+ KBASE_DEBUG_ASSERT(katom->affinity == 0);
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false,
+ "Unhandled coreref_state: %d",
+ katom->coreref_state);
+ break;
+ }
+
+ katom->affinity = 0;
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+}
+
+static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev,
+ base_jd_core_req core_req, u64 affinity,
+ enum kbase_atom_coreref_state coreref_state)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ switch (coreref_state) {
+ case KBASE_ATOM_COREREF_STATE_READY:
+ /* State where atom was submitted to the HW - just proceed to
+ * power-down */
+ KBASE_DEBUG_ASSERT(affinity != 0 ||
+ (core_req & BASE_JD_REQ_T));
+
+ /* *** FALLTHROUGH *** */
+
+ case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
+ /* State where cores were registered */
+ KBASE_DEBUG_ASSERT(affinity != 0 ||
+ (core_req & BASE_JD_REQ_T));
+ kbase_pm_release_cores(kbdev, core_req & BASE_JD_REQ_T,
+ affinity);
+
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ /* State where cores were requested, but not registered */
+ KBASE_DEBUG_ASSERT(affinity != 0 ||
+ (core_req & BASE_JD_REQ_T));
+ kbase_pm_unrequest_cores(kbdev, core_req & BASE_JD_REQ_T,
+ affinity);
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ /* Initial state - nothing required */
+ KBASE_DEBUG_ASSERT(affinity == 0);
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false,
+ "Unhandled coreref_state: %d",
+ coreref_state);
+ break;
+ }
+}
+
+static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ ktime_t *end_timestamp)
+{
+ switch (katom->gpu_rb_state) {
+ case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
+ /* Should be impossible */
+ WARN(1, "Attempting to release atom not in ringbuffer\n");
+ break;
+
+ case KBASE_ATOM_GPU_RB_SUBMITTED:
+ /* Inform power management at start/finish of atom so it can
+ * update its GPU utilisation metrics. Mark atom as not
+ * submitted beforehand. */
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+ kbase_pm_metrics_update(kbdev, end_timestamp);
+
+ if (katom->core_req & BASE_JD_REQ_PERMON)
+ kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_READY:
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
+ kbase_js_affinity_release_slot_cores(kbdev, katom->slot_nr,
+ katom->affinity);
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
+ break;
+
+ case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+ if (katom->protected_state.enter !=
+ KBASE_ATOM_ENTER_PROTECTED_CHECK ||
+ katom->protected_state.exit !=
+ KBASE_ATOM_EXIT_PROTECTED_CHECK)
+ kbdev->protected_mode_transition = false;
+
+ if (kbase_jd_katom_is_protected(katom) &&
+ (katom->protected_state.enter ==
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2))
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
+ /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+ break;
+ }
+
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+}
+
+static void kbase_gpu_mark_atom_for_return(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ kbase_gpu_release_atom(kbdev, katom, NULL);
+ katom->gpu_rb_state = KBASE_ATOM_GPU_RB_RETURN_TO_JS;
+}
+
+static inline bool kbase_gpu_rmu_workaround(struct kbase_device *kbdev, int js)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ bool slot_busy[3];
+
+ if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ return true;
+ slot_busy[0] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 0,
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+ slot_busy[1] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 1,
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+ slot_busy[2] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 2,
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+
+ if ((js == 2 && !(slot_busy[0] || slot_busy[1])) ||
+ (js != 2 && !slot_busy[2]))
+ return true;
+
+ /* Don't submit slot 2 atom while GPU has jobs on slots 0/1 */
+ if (js == 2 && (kbase_gpu_atoms_submitted(kbdev, 0) ||
+ kbase_gpu_atoms_submitted(kbdev, 1) ||
+ backend->rmu_workaround_flag))
+ return false;
+
+ /* Don't submit slot 0/1 atom while GPU has jobs on slot 2 */
+ if (js != 2 && (kbase_gpu_atoms_submitted(kbdev, 2) ||
+ !backend->rmu_workaround_flag))
+ return false;
+
+ backend->rmu_workaround_flag = !backend->rmu_workaround_flag;
+
+ return true;
+}
+
+static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev)
+{
+ return kbdev->protected_mode;
+}
+
+static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+{
+ int err = -EINVAL;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ONCE(!kbdev->protected_ops,
+ "Cannot enter protected mode: protected callbacks not specified.\n");
+
+ /*
+ * When entering into protected mode, we must ensure that the
+ * GPU is not operating in coherent mode as well. This is to
+ * ensure that no protected memory can be leaked.
+ */
+ if (kbdev->system_coherency == COHERENCY_ACE)
+ kbase_cache_set_coherency_mode(kbdev, COHERENCY_ACE_LITE);
+
+ if (kbdev->protected_ops) {
+ /* Switch GPU to protected mode */
+ err = kbdev->protected_ops->protected_mode_enter(kbdev);
+
+ if (err)
+ dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
+ err);
+ else
+ kbdev->protected_mode = true;
+ }
+
+ return err;
+}
+
+static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ONCE(!kbdev->protected_ops,
+ "Cannot exit protected mode: protected callbacks not specified.\n");
+
+ if (!kbdev->protected_ops)
+ return -EINVAL;
+
+ kbase_reset_gpu_silent(kbdev);
+
+ return 0;
+}
+
+static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
+ struct kbase_jd_atom **katom, int idx, int js)
+{
+ int err = 0;
+
+ switch (katom[idx]->protected_state.enter) {
+ case KBASE_ATOM_ENTER_PROTECTED_CHECK:
+ /* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+ * should ensure that we are not already transitiong, and that
+ * there are no atoms currently on the GPU. */
+ WARN_ON(kbdev->protected_mode_transition);
+ WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+
+ kbdev->protected_mode_transition = true;
+ katom[idx]->protected_state.enter =
+ KBASE_ATOM_ENTER_PROTECTED_VINSTR;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_ENTER_PROTECTED_VINSTR:
+ if (kbase_vinstr_try_suspend(kbdev->vinstr_ctx) < 0) {
+ /*
+ * We can't switch now because
+ * the vinstr core state switch
+ * is not done yet.
+ */
+ return -EAGAIN;
+ }
+
+ /* Once reaching this point GPU must be
+ * switched to protected mode or vinstr
+ * re-enabled. */
+
+ /*
+ * Not in correct mode, begin protected mode switch.
+ * Entering protected mode requires us to power down the L2,
+ * and drop out of fully coherent mode.
+ */
+ katom[idx]->protected_state.enter =
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_ENTER_PROTECTED_IDLE_L2:
+ /* Avoid unnecessary waiting on non-ACE platforms. */
+ if (kbdev->current_gpu_coherency_mode == COHERENCY_ACE) {
+ if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
+ kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
+ /*
+ * The L2 is still powered, wait for all the users to
+ * finish with it before doing the actual reset.
+ */
+ return -EAGAIN;
+ }
+ }
+
+ katom[idx]->protected_state.enter =
+ KBASE_ATOM_ENTER_PROTECTED_FINISHED;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
+
+ /* No jobs running, so we can switch GPU mode right now. */
+ err = kbase_gpu_protected_mode_enter(kbdev);
+
+ /*
+ * Regardless of result, we are no longer transitioning
+ * the GPU.
+ */
+ kbdev->protected_mode_transition = false;
+
+ if (err) {
+ /*
+ * Failed to switch into protected mode, resume
+ * vinstr core and fail atom.
+ */
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+ katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+ kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+ /* Only return if head atom or previous atom
+ * already removed - as atoms must be returned
+ * in order. */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+ }
+ return -EINVAL;
+ }
+
+ /* Protected mode sanity checks. */
+ KBASE_DEBUG_ASSERT_MSG(
+ kbase_jd_katom_is_protected(katom[idx]) ==
+ kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom[idx]),
+ kbase_gpu_in_protected_mode(kbdev));
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_READY;
+ }
+
+ return 0;
+}
+
+static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
+ struct kbase_jd_atom **katom, int idx, int js)
+{
+ int err = 0;
+
+
+ switch (katom[idx]->protected_state.exit) {
+ case KBASE_ATOM_EXIT_PROTECTED_CHECK:
+ /* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+ * should ensure that we are not already transitiong, and that
+ * there are no atoms currently on the GPU. */
+ WARN_ON(kbdev->protected_mode_transition);
+ WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+
+ /*
+ * Exiting protected mode requires a reset, but first the L2
+ * needs to be powered down to ensure it's not active when the
+ * reset is issued.
+ */
+ katom[idx]->protected_state.exit =
+ KBASE_ATOM_EXIT_PROTECTED_IDLE_L2;
+
+ kbdev->protected_mode_transition = true;
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+ case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2:
+ if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
+ kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
+ /*
+ * The L2 is still powered, wait for all the users to
+ * finish with it before doing the actual reset.
+ */
+ return -EAGAIN;
+ }
+ katom[idx]->protected_state.exit =
+ KBASE_ATOM_EXIT_PROTECTED_RESET;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_EXIT_PROTECTED_RESET:
+ /* Issue the reset to the GPU */
+ err = kbase_gpu_protected_mode_reset(kbdev);
+
+ if (err) {
+ kbdev->protected_mode_transition = false;
+
+ /* Failed to exit protected mode, fail atom */
+ katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+ kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+ /* Only return if head atom or previous atom
+ * already removed - as atoms must be returned
+ * in order */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+ }
+
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ return -EINVAL;
+ }
+
+ katom[idx]->protected_state.exit =
+ KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT:
+ if (kbase_reset_gpu_active(kbdev))
+ return -EAGAIN;
+
+ kbdev->protected_mode_transition = false;
+ kbdev->protected_mode = false;
+
+ /* protected mode sanity checks */
+ KBASE_DEBUG_ASSERT_MSG(
+ kbase_jd_katom_is_protected(katom[idx]) == kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom[idx]), kbase_gpu_in_protected_mode(kbdev));
+ KBASE_DEBUG_ASSERT_MSG(
+ (kbase_jd_katom_is_protected(katom[idx]) && js == 0) ||
+ !kbase_jd_katom_is_protected(katom[idx]),
+ "Protected atom on JS%d not supported", js);
+ }
+
+ return 0;
+}
+
+void kbase_backend_slot_update(struct kbase_device *kbdev)
+{
+ int js;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ struct kbase_jd_atom *katom[2];
+ int idx;
+
+ katom[0] = kbase_gpu_inspect(kbdev, js, 0);
+ katom[1] = kbase_gpu_inspect(kbdev, js, 1);
+ WARN_ON(katom[1] && !katom[0]);
+
+ for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+ bool cores_ready;
+ int ret;
+
+ if (!katom[idx])
+ continue;
+
+ switch (katom[idx]->gpu_rb_state) {
+ case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
+ /* Should be impossible */
+ WARN(1, "Attempting to update atom not in ringbuffer\n");
+ break;
+
+ case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
+ if (katom[idx]->atom_flags &
+ KBASE_KATOM_FLAG_X_DEP_BLOCKED)
+ break;
+
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
+ if (kbase_gpu_check_secure_atoms(kbdev,
+ !kbase_jd_katom_is_protected(
+ katom[idx])))
+ break;
+
+ if (kbdev->protected_mode_transition)
+ break;
+
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+
+ /*
+ * Exiting protected mode must be done before
+ * the references on the cores are taken as
+ * a power down the L2 is required which
+ * can't happen after the references for this
+ * atom are taken.
+ */
+
+ if (!kbase_gpu_in_protected_mode(kbdev) &&
+ kbase_jd_katom_is_protected(katom[idx])) {
+ /* Atom needs to transition into protected mode. */
+ ret = kbase_jm_enter_protected_mode(kbdev,
+ katom, idx, js);
+ if (ret)
+ break;
+ } else if (kbase_gpu_in_protected_mode(kbdev) &&
+ !kbase_jd_katom_is_protected(katom[idx])) {
+ /* Atom needs to transition out of protected mode. */
+ ret = kbase_jm_exit_protected_mode(kbdev,
+ katom, idx, js);
+ if (ret)
+ break;
+ }
+ katom[idx]->protected_state.exit =
+ KBASE_ATOM_EXIT_PROTECTED_CHECK;
+
+ /* Atom needs no protected mode transition. */
+
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
+ if (katom[idx]->will_fail_event_code) {
+ kbase_gpu_mark_atom_for_return(kbdev,
+ katom[idx]);
+ /* Set EVENT_DONE so this atom will be
+ completed, not unpulled. */
+ katom[idx]->event_code =
+ BASE_JD_EVENT_DONE;
+ /* Only return if head atom or previous
+ * atom already removed - as atoms must
+ * be returned in order. */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+ }
+ break;
+ }
+
+ cores_ready =
+ kbasep_js_job_check_ref_cores(kbdev, js,
+ katom[idx]);
+
+ if (katom[idx]->event_code ==
+ BASE_JD_EVENT_PM_EVENT) {
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_RETURN_TO_JS;
+ break;
+ }
+
+ if (!cores_ready)
+ break;
+
+ kbase_js_affinity_retain_slot_cores(kbdev, js,
+ katom[idx]->affinity);
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
+ if (!kbase_gpu_rmu_workaround(kbdev, js))
+ break;
+
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_READY;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_READY:
+
+ /* Only submit if head atom or previous atom
+ * already submitted */
+ if (idx == 1 &&
+ (katom[0]->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_SUBMITTED &&
+ katom[0]->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB))
+ break;
+
+ /* Check if this job needs the cycle counter
+ * enabled before submission */
+ if (katom[idx]->core_req & BASE_JD_REQ_PERMON)
+ kbase_pm_request_gpu_cycle_counter_l2_is_on(
+ kbdev);
+
+ kbase_job_hw_submit(kbdev, katom[idx], js);
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_SUBMITTED;
+
+ /* Inform power management at start/finish of
+ * atom so it can update its GPU utilisation
+ * metrics. */
+ kbase_pm_metrics_update(kbdev,
+ &katom[idx]->start_timestamp);
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_GPU_RB_SUBMITTED:
+ /* Atom submitted to HW, nothing else to do */
+ break;
+
+ case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+ /* Only return if head atom or previous atom
+ * already removed - as atoms must be returned
+ * in order */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev,
+ katom[idx]);
+ }
+ break;
+ }
+ }
+ }
+
+ /* Warn if PRLAM-8987 affinity restrictions are violated */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ WARN_ON((kbase_gpu_atoms_submitted(kbdev, 0) ||
+ kbase_gpu_atoms_submitted(kbdev, 1)) &&
+ kbase_gpu_atoms_submitted(kbdev, 2));
+}
+
+
+void kbase_backend_run_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ kbase_gpu_enqueue_atom(kbdev, katom);
+ kbase_backend_slot_update(kbdev);
+}
+
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_jd_atom *next_katom;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ katom = kbase_gpu_inspect(kbdev, js, 0);
+ next_katom = kbase_gpu_inspect(kbdev, js, 1);
+
+ if (next_katom && katom->kctx == next_katom->kctx &&
+ next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED &&
+ (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL)
+ != 0 ||
+ kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL)
+ != 0)) {
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
+ JS_COMMAND_NOP, NULL);
+ next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+ return true;
+ }
+
+ return false;
+}
+
+void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ u32 completion_code,
+ u64 job_tail,
+ ktime_t *end_timestamp)
+{
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
+ struct kbase_context *kctx = katom->kctx;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) &&
+ completion_code != BASE_JD_EVENT_DONE &&
+ !(completion_code & BASE_JD_SW_EVENT)) {
+ katom->need_cache_flush_cores_retained = katom->affinity;
+ kbase_pm_request_cores(kbdev, false, katom->affinity);
+ } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
+ if (kbdev->gpu_props.num_core_groups > 1 &&
+ !(katom->affinity &
+ kbdev->gpu_props.props.coherency_info.group[0].core_mask
+ ) &&
+ (katom->affinity &
+ kbdev->gpu_props.props.coherency_info.group[1].core_mask
+ )) {
+ dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
+ katom->need_cache_flush_cores_retained =
+ katom->affinity;
+ kbase_pm_request_cores(kbdev, false,
+ katom->affinity);
+ }
+ }
+
+ katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+ kbase_timeline_job_slot_done(kbdev, katom->kctx, katom, js, 0);
+ kbase_tlstream_tl_nret_atom_lpu(
+ katom,
+ &kbdev->gpu_props.props.raw_props.js_features[
+ katom->slot_nr]);
+ kbase_tlstream_tl_nret_atom_as(katom, &kbdev->as[kctx->as_nr]);
+ kbase_tlstream_tl_nret_ctx_lpu(
+ kctx,
+ &kbdev->gpu_props.props.raw_props.js_features[
+ katom->slot_nr]);
+
+ if (completion_code == BASE_JD_EVENT_STOPPED) {
+ struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+ 0);
+
+ /*
+ * Dequeue next atom from ringbuffers on same slot if required.
+ * This atom will already have been removed from the NEXT
+ * registers by kbase_gpu_soft_hard_stop_slot(), to ensure that
+ * the atoms on this slot are returned in the correct order.
+ */
+ if (next_katom && katom->kctx == next_katom->kctx) {
+ kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+ kbase_jm_return_atom_to_js(kbdev, next_katom);
+ }
+ } else if (completion_code != BASE_JD_EVENT_DONE) {
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ int i;
+
+#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
+ KBASE_TRACE_DUMP(kbdev);
+#endif
+ kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
+
+ /*
+ * Remove all atoms on the same context from ringbuffers. This
+ * will not remove atoms that are already on the GPU, as these
+ * are guaranteed not to have fail dependencies on the failed
+ * atom.
+ */
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
+ struct kbase_jd_atom *katom_idx0 =
+ kbase_gpu_inspect(kbdev, i, 0);
+ struct kbase_jd_atom *katom_idx1 =
+ kbase_gpu_inspect(kbdev, i, 1);
+
+ if (katom_idx0 && katom_idx0->kctx == katom->kctx &&
+ katom_idx0->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Dequeue katom_idx0 from ringbuffer */
+ kbase_gpu_dequeue_atom(kbdev, i, end_timestamp);
+
+ if (katom_idx1 &&
+ katom_idx1->kctx == katom->kctx &&
+ katom_idx0->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Dequeue katom_idx1 from ringbuffer */
+ kbase_gpu_dequeue_atom(kbdev, i,
+ end_timestamp);
+
+ katom_idx1->event_code =
+ BASE_JD_EVENT_STOPPED;
+ kbase_jm_return_atom_to_js(kbdev,
+ katom_idx1);
+ }
+ katom_idx0->event_code = BASE_JD_EVENT_STOPPED;
+ kbase_jm_return_atom_to_js(kbdev, katom_idx0);
+
+ } else if (katom_idx1 &&
+ katom_idx1->kctx == katom->kctx &&
+ katom_idx1->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Can not dequeue this atom yet - will be
+ * dequeued when atom at idx0 completes */
+ katom_idx1->event_code = BASE_JD_EVENT_STOPPED;
+ kbase_gpu_mark_atom_for_return(kbdev,
+ katom_idx1);
+ }
+ }
+ }
+
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, kctx, katom, katom->jc,
+ js, completion_code);
+
+ if (job_tail != 0 && job_tail != katom->jc) {
+ bool was_updated = (job_tail != katom->jc);
+
+ /* Some of the job has been executed, so we update the job chain
+ * address to where we should resume from */
+ katom->jc = job_tail;
+ if (was_updated)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_UPDATE_HEAD, katom->kctx,
+ katom, job_tail, js);
+ }
+
+ /* Only update the event code for jobs that weren't cancelled */
+ if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED)
+ katom->event_code = (base_jd_event_code)completion_code;
+
+ kbase_device_trace_register_access(kctx, REG_WRITE,
+ JOB_CONTROL_REG(JOB_IRQ_CLEAR),
+ 1 << js);
+
+ /* Complete the job, and start new ones
+ *
+ * Also defer remaining work onto the workqueue:
+ * - Re-queue Soft-stopped jobs
+ * - For any other jobs, queue the job back into the dependency system
+ * - Schedule out the parent context if necessary, and schedule a new
+ * one in.
+ */
+#ifdef CONFIG_GPU_TRACEPOINTS
+ {
+ /* The atom in the HEAD */
+ struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+ 0);
+
+ if (next_katom && next_katom->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ char js_string[16];
+
+ trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
+ js_string),
+ ktime_to_ns(*end_timestamp),
+ (u32)next_katom->kctx->id, 0,
+ next_katom->work_id);
+ kbdev->hwaccess.backend.slot_rb[js].last_context =
+ next_katom->kctx;
+ } else {
+ char js_string[16];
+
+ trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
+ js_string),
+ ktime_to_ns(ktime_get()), 0, 0,
+ 0);
+ kbdev->hwaccess.backend.slot_rb[js].last_context = 0;
+ }
+ }
+#endif
+
+ if (completion_code == BASE_JD_EVENT_STOPPED)
+ katom = kbase_jm_return_atom_to_js(kbdev, katom);
+ else
+ katom = kbase_jm_complete(kbdev, katom, end_timestamp);
+
+ if (katom) {
+ /* Cross-slot dependency has now become runnable. Try to submit
+ * it. */
+
+ /* Check if there are lower priority jobs to soft stop */
+ kbase_job_slot_ctx_priority_check_locked(kctx, katom);
+
+ kbase_jm_try_kick(kbdev, 1 << katom->slot_nr);
+ }
+
+ /* Job completion may have unblocked other atoms. Try to update all job
+ * slots */
+ kbase_backend_slot_update(kbdev);
+}
+
+void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
+{
+ int js;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ int atom_idx = 0;
+ int idx;
+
+ for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+ js, atom_idx);
+ bool keep_in_jm_rb = false;
+
+ if (!katom)
+ break;
+
+ if (katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED)
+ keep_in_jm_rb = true;
+
+ kbase_gpu_release_atom(kbdev, katom, NULL);
+
+ /*
+ * If the atom wasn't on HW when the reset was issued
+ * then leave it in the RB and next time we're kicked
+ * it will be processed again from the starting state.
+ */
+ if (keep_in_jm_rb) {
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+ katom->affinity = 0;
+ katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+ /* As the atom was not removed, increment the
+ * index so that we read the correct atom in the
+ * next iteration. */
+ atom_idx++;
+ continue;
+ }
+
+ /*
+ * The atom was on the HW when the reset was issued
+ * all we can do is fail the atom.
+ */
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ kbase_jm_complete(kbdev, katom, end_timestamp);
+ }
+ }
+
+ kbdev->protected_mode_transition = false;
+ kbdev->protected_mode = false;
+}
+
+static inline void kbase_gpu_stop_atom(struct kbase_device *kbdev,
+ int js,
+ struct kbase_jd_atom *katom,
+ u32 action)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ u32 hw_action = action & JS_COMMAND_MASK;
+
+ kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, katom);
+ kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, hw_action,
+ katom->core_req, katom);
+ kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
+}
+
+static inline void kbase_gpu_remove_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ u32 action,
+ bool disjoint)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+ kbase_gpu_mark_atom_for_return(kbdev, katom);
+ kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
+
+ if (disjoint)
+ kbase_job_check_enter_disjoint(kbdev, action, katom->core_req,
+ katom);
+}
+
+static int should_stop_x_dep_slot(struct kbase_jd_atom *katom)
+{
+ if (katom->x_post_dep) {
+ struct kbase_jd_atom *dep_atom = katom->x_post_dep;
+
+ if (dep_atom->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB &&
+ dep_atom->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_RETURN_TO_JS)
+ return dep_atom->slot_nr;
+ }
+ return -1;
+}
+
+static void kbase_job_evicted(struct kbase_jd_atom *katom)
+{
+ kbase_timeline_job_slot_done(katom->kctx->kbdev, katom->kctx, katom,
+ katom->slot_nr, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
+}
+
+bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js,
+ struct kbase_jd_atom *katom,
+ u32 action)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ struct kbase_jd_atom *katom_idx0;
+ struct kbase_jd_atom *katom_idx1;
+
+ bool katom_idx0_valid, katom_idx1_valid;
+
+ bool ret = false;
+
+ int stop_x_dep_idx0 = -1, stop_x_dep_idx1 = -1;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ katom_idx0 = kbase_gpu_inspect(kbdev, js, 0);
+ katom_idx1 = kbase_gpu_inspect(kbdev, js, 1);
+
+ if (katom) {
+ katom_idx0_valid = (katom_idx0 == katom);
+ /* If idx0 is to be removed and idx1 is on the same context,
+ * then idx1 must also be removed otherwise the atoms might be
+ * returned out of order */
+ if (katom_idx1)
+ katom_idx1_valid = (katom_idx1 == katom) ||
+ (katom_idx0_valid &&
+ (katom_idx0->kctx ==
+ katom_idx1->kctx));
+ else
+ katom_idx1_valid = false;
+ } else {
+ katom_idx0_valid = (katom_idx0 &&
+ (!kctx || katom_idx0->kctx == kctx));
+ katom_idx1_valid = (katom_idx1 &&
+ (!kctx || katom_idx1->kctx == kctx));
+ }
+
+ if (katom_idx0_valid)
+ stop_x_dep_idx0 = should_stop_x_dep_slot(katom_idx0);
+ if (katom_idx1_valid)
+ stop_x_dep_idx1 = should_stop_x_dep_slot(katom_idx1);
+
+ if (katom_idx0_valid) {
+ if (katom_idx0->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Simple case - just dequeue and return */
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ if (katom_idx1_valid) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ katom_idx1->event_code =
+ BASE_JD_EVENT_REMOVED_FROM_NEXT;
+ kbase_jm_return_atom_to_js(kbdev, katom_idx1);
+ kbasep_js_clear_submit_allowed(js_devdata,
+ katom_idx1->kctx);
+ }
+
+ katom_idx0->event_code =
+ BASE_JD_EVENT_REMOVED_FROM_NEXT;
+ kbase_jm_return_atom_to_js(kbdev, katom_idx0);
+ kbasep_js_clear_submit_allowed(js_devdata,
+ katom_idx0->kctx);
+ } else {
+ /* katom_idx0 is on GPU */
+ if (katom_idx1 && katom_idx1->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* katom_idx0 and katom_idx1 are on GPU */
+
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ JS_COMMAND_NEXT), NULL) == 0) {
+ /* idx0 has already completed - stop
+ * idx1 if needed*/
+ if (katom_idx1_valid) {
+ kbase_gpu_stop_atom(kbdev, js,
+ katom_idx1,
+ action);
+ ret = true;
+ }
+ } else {
+ /* idx1 is in NEXT registers - attempt
+ * to remove */
+ kbase_reg_write(kbdev,
+ JOB_SLOT_REG(js,
+ JS_COMMAND_NEXT),
+ JS_COMMAND_NOP, NULL);
+
+ if (kbase_reg_read(kbdev,
+ JOB_SLOT_REG(js,
+ JS_HEAD_NEXT_LO), NULL)
+ != 0 ||
+ kbase_reg_read(kbdev,
+ JOB_SLOT_REG(js,
+ JS_HEAD_NEXT_HI), NULL)
+ != 0) {
+ /* idx1 removed successfully,
+ * will be handled in IRQ */
+ kbase_job_evicted(katom_idx1);
+ kbase_gpu_remove_atom(kbdev,
+ katom_idx1,
+ action, true);
+ stop_x_dep_idx1 =
+ should_stop_x_dep_slot(katom_idx1);
+
+ /* stop idx0 if still on GPU */
+ kbase_gpu_stop_atom(kbdev, js,
+ katom_idx0,
+ action);
+ ret = true;
+ } else if (katom_idx1_valid) {
+ /* idx0 has already completed,
+ * stop idx1 if needed */
+ kbase_gpu_stop_atom(kbdev, js,
+ katom_idx1,
+ action);
+ ret = true;
+ }
+ }
+ } else if (katom_idx1_valid) {
+ /* idx1 not on GPU but must be dequeued*/
+
+ /* idx1 will be handled in IRQ */
+ kbase_gpu_remove_atom(kbdev, katom_idx1, action,
+ false);
+ /* stop idx0 */
+ /* This will be repeated for anything removed
+ * from the next registers, since their normal
+ * flow was also interrupted, and this function
+ * might not enter disjoint state e.g. if we
+ * don't actually do a hard stop on the head
+ * atom */
+ kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+ action);
+ ret = true;
+ } else {
+ /* no atom in idx1 */
+ /* just stop idx0 */
+ kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+ action);
+ ret = true;
+ }
+ }
+ } else if (katom_idx1_valid) {
+ if (katom_idx1->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
+ /* Mark for return */
+ /* idx1 will be returned once idx0 completes */
+ kbase_gpu_remove_atom(kbdev, katom_idx1, action,
+ false);
+ } else {
+ /* idx1 is on GPU */
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ JS_COMMAND_NEXT), NULL) == 0) {
+ /* idx0 has already completed - stop idx1 */
+ kbase_gpu_stop_atom(kbdev, js, katom_idx1,
+ action);
+ ret = true;
+ } else {
+ /* idx1 is in NEXT registers - attempt to
+ * remove */
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js,
+ JS_COMMAND_NEXT),
+ JS_COMMAND_NOP, NULL);
+
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ JS_HEAD_NEXT_LO), NULL) != 0 ||
+ kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ JS_HEAD_NEXT_HI), NULL) != 0) {
+ /* idx1 removed successfully, will be
+ * handled in IRQ once idx0 completes */
+ kbase_job_evicted(katom_idx1);
+ kbase_gpu_remove_atom(kbdev, katom_idx1,
+ action,
+ false);
+ } else {
+ /* idx0 has already completed - stop
+ * idx1 */
+ kbase_gpu_stop_atom(kbdev, js,
+ katom_idx1,
+ action);
+ ret = true;
+ }
+ }
+ }
+ }
+
+
+ if (stop_x_dep_idx0 != -1)
+ kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx0,
+ NULL, action);
+
+ if (stop_x_dep_idx1 != -1)
+ kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx1,
+ NULL, action);
+
+ return ret;
+}
+
+void kbase_gpu_cacheclean(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ /* Limit the number of loops to avoid a hang if the interrupt is missed
+ */
+ u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+ unsigned long flags;
+
+ mutex_lock(&kbdev->cacheclean_lock);
+
+ /* use GPU_COMMAND completion solution */
+ /* clean & invalidate the caches */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CLEAN_INV_CACHES, NULL);
+
+ /* wait for cache flush to complete before continuing */
+ while (--max_loops &&
+ (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
+ CLEAN_CACHES_COMPLETED) == 0)
+ ;
+
+ /* clear the CLEAN_CACHES_COMPLETED irq */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u,
+ CLEAN_CACHES_COMPLETED);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR),
+ CLEAN_CACHES_COMPLETED, NULL);
+ KBASE_DEBUG_ASSERT_MSG(kbdev->hwcnt.backend.state !=
+ KBASE_INSTR_STATE_CLEANING,
+ "Instrumentation code was cleaning caches, but Job Management code cleared their IRQ - Instrumentation code will now hang.");
+
+ mutex_unlock(&kbdev->cacheclean_lock);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_unrequest_cores(kbdev, false,
+ katom->need_cache_flush_cores_retained);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_backend_complete_wq(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ /*
+ * If cache flush required due to HW workaround then perform the flush
+ * now
+ */
+ if (katom->need_cache_flush_cores_retained) {
+ kbase_gpu_cacheclean(kbdev, katom);
+ katom->need_cache_flush_cores_retained = 0;
+ }
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969) &&
+ (katom->core_req & BASE_JD_REQ_FS) &&
+ katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT &&
+ (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
+ !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) {
+ dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n");
+ if (kbasep_10969_workaround_clamp_coordinates(katom)) {
+ /* The job had a TILE_RANGE_FAULT after was soft-stopped
+ * Due to an HW issue we try to execute the job again.
+ */
+ dev_dbg(kbdev->dev,
+ "Clamping has been executed, try to rerun the job\n"
+ );
+ katom->event_code = BASE_JD_EVENT_STOPPED;
+ katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
+ }
+ }
+
+ /* Clear the coreref_state now - while check_deref_cores() may not have
+ * been called yet, the caller will have taken a copy of this field. If
+ * this is not done, then if the atom is re-scheduled (following a soft
+ * stop) then the core reference would not be retaken. */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+ katom->affinity = 0;
+}
+
+void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
+ base_jd_core_req core_req, u64 affinity,
+ enum kbase_atom_coreref_state coreref_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbasep_js_job_check_deref_cores_nokatom(kbdev, core_req, affinity,
+ coreref_state);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ if (!kbdev->pm.active_count) {
+ mutex_lock(&kbdev->js_data.runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+ kbase_pm_update_active(kbdev);
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&kbdev->js_data.runpool_mutex);
+ }
+}
+
+void kbase_gpu_dump_slots(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata;
+ unsigned long flags;
+ int js;
+
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ dev_info(kbdev->dev, "kbase_gpu_dump_slots:\n");
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ int idx;
+
+ for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+ js,
+ idx);
+
+ if (katom)
+ dev_info(kbdev->dev,
+ " js%d idx%d : katom=%p gpu_rb_state=%d\n",
+ js, idx, katom, katom->gpu_rb_state);
+ else
+ dev_info(kbdev->dev, " js%d idx%d : empty\n",
+ js, idx);
+ }
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_jm_rb.h b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_rb.h
new file mode 100755
index 0000000..1e0e05a
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_jm_rb.h
@@ -0,0 +1,76 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific APIs
+ */
+
+#ifndef _KBASE_HWACCESS_GPU_H_
+#define _KBASE_HWACCESS_GPU_H_
+
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+/**
+ * kbase_gpu_irq_evict - Evict an atom from a NEXT slot
+ *
+ * @kbdev: Device pointer
+ * @js: Job slot to evict from
+ *
+ * Evict the atom in the NEXT slot for the specified job slot. This function is
+ * called from the job complete IRQ handler when the previous job has failed.
+ *
+ * Return: true if job evicted from NEXT registers, false otherwise
+ */
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_gpu_complete_hw - Complete an atom on job slot js
+ *
+ * @kbdev: Device pointer
+ * @js: Job slot that has completed
+ * @completion_code: Event code from job that has completed
+ * @job_tail: The tail address from the hardware if the job has partially
+ * completed
+ * @end_timestamp: Time of completion
+ */
+void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ u32 completion_code,
+ u64 job_tail,
+ ktime_t *end_timestamp);
+
+/**
+ * kbase_gpu_inspect - Inspect the contents of the HW access ringbuffer
+ *
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ * @idx: Index into ringbuffer. 0 is the job currently running on
+ * the slot, 1 is the job waiting, all other values are invalid.
+ * Return: The atom at that position in the ringbuffer
+ * or NULL if no atom present
+ */
+struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
+ int idx);
+
+/**
+ * kbase_gpu_dump_slots - Print the contents of the slot ringbuffers
+ *
+ * @kbdev: Device pointer
+ */
+void kbase_gpu_dump_slots(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_GPU_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_js_affinity.c b/midgard-0.r2p0/backend/gpu/mali_kbase_js_affinity.c
new file mode 100755
index 0000000..54d8ddd
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_js_affinity.c
@@ -0,0 +1,303 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel affinity manager APIs
+ */
+
+#include <mali_kbase.h>
+#include "mali_kbase_js_affinity.h"
+#include "mali_kbase_hw.h"
+
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+
+bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev,
+ int js)
+{
+ /*
+ * Here are the reasons for using job slot 2:
+ * - BASE_HW_ISSUE_8987 (which is entirely used for that purpose)
+ * - In absence of the above, then:
+ * - Atoms with BASE_JD_REQ_COHERENT_GROUP
+ * - But, only when there aren't contexts with
+ * KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, because the atoms that run on
+ * all cores on slot 1 could be blocked by those using a coherent group
+ * on slot 2
+ * - And, only when you actually have 2 or more coregroups - if you
+ * only have 1 coregroup, then having jobs for slot 2 implies they'd
+ * also be for slot 1, meaning you'll get interference from them. Jobs
+ * able to run on slot 2 could also block jobs that can only run on
+ * slot 1 (tiler jobs)
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ return true;
+
+ if (js != 2)
+ return true;
+
+ /* Only deal with js==2 now: */
+ if (kbdev->gpu_props.num_core_groups > 1) {
+ /* Only use slot 2 in the 2+ coregroup case */
+ if (kbasep_js_ctx_attr_is_attr_on_runpool(kbdev,
+ KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) ==
+ false) {
+ /* ...But only when we *don't* have atoms that run on
+ * all cores */
+
+ /* No specific check for BASE_JD_REQ_COHERENT_GROUP
+ * atoms - the policy will sort that out */
+ return true;
+ }
+ }
+
+ /* Above checks failed mean we shouldn't use slot 2 */
+ return false;
+}
+
+/*
+ * As long as it has been decided to have a deeper modification of
+ * what job scheduler, power manager and affinity manager will
+ * implement, this function is just an intermediate step that
+ * assumes:
+ * - all working cores will be powered on when this is called.
+ * - largest current configuration is 2 core groups.
+ * - It has been decided not to have hardcoded values so the low
+ * and high cores in a core split will be evently distributed.
+ * - Odd combinations of core requirements have been filtered out
+ * and do not get to this function (e.g. CS+T+NSS is not
+ * supported here).
+ * - This function is frequently called and can be optimized,
+ * (see notes in loops), but as the functionallity will likely
+ * be modified, optimization has not been addressed.
+*/
+bool kbase_js_choose_affinity(u64 * const affinity,
+ struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, int js)
+{
+ base_jd_core_req core_req = katom->core_req;
+ unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
+ u64 core_availability_mask;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ core_availability_mask = kbase_pm_ca_get_core_mask(kbdev);
+
+ /*
+ * If no cores are currently available (core availability policy is
+ * transitioning) then fail.
+ */
+ if (0 == core_availability_mask) {
+ *affinity = 0;
+ return false;
+ }
+
+ KBASE_DEBUG_ASSERT(js >= 0);
+
+ if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
+ BASE_JD_REQ_T) {
+ /* If the hardware supports XAFFINITY then we'll only enable
+ * the tiler (which is the default so this is a no-op),
+ * otherwise enable shader core 0. */
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
+ *affinity = 1;
+ else
+ *affinity = 0;
+
+ return true;
+ }
+
+ if (1 == kbdev->gpu_props.num_cores) {
+ /* trivial case only one core, nothing to do */
+ *affinity = core_availability_mask &
+ kbdev->pm.debug_core_mask[js];
+ } else {
+ if ((core_req & (BASE_JD_REQ_COHERENT_GROUP |
+ BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
+ if (js == 0 || num_core_groups == 1) {
+ /* js[0] and single-core-group systems just get
+ * the first core group */
+ *affinity =
+ kbdev->gpu_props.props.coherency_info.group[0].core_mask
+ & core_availability_mask &
+ kbdev->pm.debug_core_mask[js];
+ } else {
+ /* js[1], js[2] use core groups 0, 1 for
+ * dual-core-group systems */
+ u32 core_group_idx = ((u32) js) - 1;
+
+ KBASE_DEBUG_ASSERT(core_group_idx <
+ num_core_groups);
+ *affinity =
+ kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask
+ & core_availability_mask &
+ kbdev->pm.debug_core_mask[js];
+
+ /* If the job is specifically targeting core
+ * group 1 and the core availability policy is
+ * keeping that core group off, then fail */
+ if (*affinity == 0 && core_group_idx == 1 &&
+ kbdev->pm.backend.cg1_disabled
+ == true)
+ katom->event_code =
+ BASE_JD_EVENT_PM_EVENT;
+ }
+ } else {
+ /* All cores are available when no core split is
+ * required */
+ *affinity = core_availability_mask &
+ kbdev->pm.debug_core_mask[js];
+ }
+ }
+
+ /*
+ * If no cores are currently available in the desired core group(s)
+ * (core availability policy is transitioning) then fail.
+ */
+ if (*affinity == 0)
+ return false;
+
+ /* Enable core 0 if tiler required for hardware without XAFFINITY
+ * support (notes above) */
+ if (core_req & BASE_JD_REQ_T) {
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
+ *affinity = *affinity | 1;
+ }
+
+ return true;
+}
+
+static inline bool kbase_js_affinity_is_violating(
+ struct kbase_device *kbdev,
+ u64 *affinities)
+{
+ /* This implementation checks whether the two slots involved in Generic
+ * thread creation have intersecting affinity. This is due to micro-
+ * architectural issues where a job in slot A targetting cores used by
+ * slot B could prevent the job in slot B from making progress until the
+ * job in slot A has completed.
+ */
+ u64 affinity_set_left;
+ u64 affinity_set_right;
+ u64 intersection;
+
+ KBASE_DEBUG_ASSERT(affinities != NULL);
+
+ affinity_set_left = affinities[1];
+
+ affinity_set_right = affinities[2];
+
+ /* A violation occurs when any bit in the left_set is also in the
+ * right_set */
+ intersection = affinity_set_left & affinity_set_right;
+
+ return (bool) (intersection != (u64) 0u);
+}
+
+bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js,
+ u64 affinity)
+{
+ struct kbasep_js_device_data *js_devdata;
+ u64 new_affinities[BASE_JM_MAX_NR_SLOTS];
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ memcpy(new_affinities, js_devdata->runpool_irq.slot_affinities,
+ sizeof(js_devdata->runpool_irq.slot_affinities));
+
+ new_affinities[js] |= affinity;
+
+ return kbase_js_affinity_is_violating(kbdev, new_affinities);
+}
+
+void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js,
+ u64 affinity)
+{
+ struct kbasep_js_device_data *js_devdata;
+ u64 cores;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_DEBUG_ASSERT(kbase_js_affinity_would_violate(kbdev, js, affinity)
+ == false);
+
+ cores = affinity;
+ while (cores) {
+ int bitnum = fls64(cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ s8 cnt;
+
+ cnt =
+ ++(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
+
+ if (cnt == 1)
+ js_devdata->runpool_irq.slot_affinities[js] |= bit;
+
+ cores &= ~bit;
+ }
+}
+
+void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js,
+ u64 affinity)
+{
+ struct kbasep_js_device_data *js_devdata;
+ u64 cores;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ cores = affinity;
+ while (cores) {
+ int bitnum = fls64(cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ s8 cnt;
+
+ KBASE_DEBUG_ASSERT(
+ js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum] > 0);
+
+ cnt =
+ --(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
+
+ if (0 == cnt)
+ js_devdata->runpool_irq.slot_affinities[js] &= ~bit;
+
+ cores &= ~bit;
+ }
+}
+
+#if KBASE_TRACE_ENABLE
+void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata;
+ int slot_nr;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ for (slot_nr = 0; slot_nr < 3; ++slot_nr)
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_AFFINITY_CURRENT, NULL,
+ NULL, 0u, slot_nr,
+ (u32) js_devdata->runpool_irq.slot_affinities[slot_nr]);
+}
+#endif /* KBASE_TRACE_ENABLE */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_js_affinity.h b/midgard-0.r2p0/backend/gpu/mali_kbase_js_affinity.h
new file mode 100755
index 0000000..35d9781
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_js_affinity.h
@@ -0,0 +1,129 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Affinity Manager internal APIs.
+ */
+
+#ifndef _KBASE_JS_AFFINITY_H_
+#define _KBASE_JS_AFFINITY_H_
+
+/**
+ * kbase_js_can_run_job_on_slot_no_lock - Decide whether it is possible to
+ * submit a job to a particular job slot in the current status
+ *
+ * @kbdev: The kbase device structure of the device
+ * @js: Job slot number to check for allowance
+ *
+ * Will check if submitting to the given job slot is allowed in the current
+ * status. For example using job slot 2 while in soft-stoppable state and only
+ * having 1 coregroup is not allowed by the policy. This function should be
+ * called prior to submitting a job to a slot to make sure policy rules are not
+ * violated.
+ *
+ * The following locking conditions are made on the caller
+ * - it must hold hwaccess_lock
+ */
+bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_js_choose_affinity - Compute affinity for a given job.
+ *
+ * @affinity: Affinity bitmap computed
+ * @kbdev: The kbase device structure of the device
+ * @katom: Job chain of which affinity is going to be found
+ * @js: Slot the job chain is being submitted
+ *
+ * Currently assumes an all-on/all-off power management policy.
+ * Also assumes there is at least one core with tiler available.
+ *
+ * Returns true if a valid affinity was chosen, false if
+ * no cores were available.
+ */
+bool kbase_js_choose_affinity(u64 * const affinity,
+ struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom,
+ int js);
+
+/**
+ * kbase_js_affinity_would_violate - Determine whether a proposed affinity on
+ * job slot @js would cause a violation of affinity restrictions.
+ *
+ * @kbdev: Kbase device structure
+ * @js: The job slot to test
+ * @affinity: The affinity mask to test
+ *
+ * The following locks must be held by the caller
+ * - hwaccess_lock
+ *
+ * Return: true if the affinity would violate the restrictions
+ */
+bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js,
+ u64 affinity);
+
+/**
+ * kbase_js_affinity_retain_slot_cores - Affinity tracking: retain cores used by
+ * a slot
+ *
+ * @kbdev: Kbase device structure
+ * @js: The job slot retaining the cores
+ * @affinity: The cores to retain
+ *
+ * The following locks must be held by the caller
+ * - hwaccess_lock
+ */
+void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js,
+ u64 affinity);
+
+/**
+ * kbase_js_affinity_release_slot_cores - Affinity tracking: release cores used
+ * by a slot
+ *
+ * @kbdev: Kbase device structure
+ * @js: Job slot
+ * @affinity: Bit mask of core to be released
+ *
+ * Cores must be released as soon as a job is dequeued from a slot's 'submit
+ * slots', and before another job is submitted to those slots. Otherwise, the
+ * refcount could exceed the maximum number submittable to a slot,
+ * %BASE_JM_SUBMIT_SLOTS.
+ *
+ * The following locks must be held by the caller
+ * - hwaccess_lock
+ */
+void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js,
+ u64 affinity);
+
+/**
+ * kbase_js_debug_log_current_affinities - log the current affinities
+ *
+ * @kbdev: Kbase device structure
+ *
+ * Output to the Trace log the current tracked affinities on all slots
+ */
+#if KBASE_TRACE_ENABLE
+void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev);
+#else /* KBASE_TRACE_ENABLE */
+static inline void
+kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
+{
+}
+#endif /* KBASE_TRACE_ENABLE */
+
+#endif /* _KBASE_JS_AFFINITY_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_js_backend.c b/midgard-0.r2p0/backend/gpu/mali_kbase_js_backend.c
new file mode 100755
index 0000000..b09d491
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_js_backend.c
@@ -0,0 +1,357 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific job scheduler APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+
+/*
+ * Define for when dumping is enabled.
+ * This should not be based on the instrumentation level as whether dumping is
+ * enabled for a particular level is down to the integrator. However this is
+ * being used for now as otherwise the cinstr headers would be needed.
+ */
+#define CINSTR_DUMPING_ENABLED (2 == MALI_INSTRUMENTATION_LEVEL)
+
+/*
+ * Hold the runpool_mutex for this
+ */
+static inline bool timer_callback_should_run(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ s8 nr_running_ctxs;
+
+ lockdep_assert_held(&kbdev->js_data.runpool_mutex);
+
+ /* Timer must stop if we are suspending */
+ if (backend->suspend_timer)
+ return false;
+
+ /* nr_contexts_pullable is updated with the runpool_mutex. However, the
+ * locking in the caller gives us a barrier that ensures
+ * nr_contexts_pullable is up-to-date for reading */
+ nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable);
+
+#ifdef CONFIG_MALI_DEBUG
+ if (kbdev->js_data.softstop_always) {
+ /* Debug support for allowing soft-stop on a single context */
+ return true;
+ }
+#endif /* CONFIG_MALI_DEBUG */
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
+ /* Timeouts would have to be 4x longer (due to micro-
+ * architectural design) to support OpenCL conformance tests, so
+ * only run the timer when there's:
+ * - 2 or more CL contexts
+ * - 1 or more GLES contexts
+ *
+ * NOTE: We will treat a context that has both Compute and Non-
+ * Compute jobs will be treated as an OpenCL context (hence, we
+ * don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE).
+ */
+ {
+ s8 nr_compute_ctxs =
+ kbasep_js_ctx_attr_count_on_runpool(kbdev,
+ KBASEP_JS_CTX_ATTR_COMPUTE);
+ s8 nr_noncompute_ctxs = nr_running_ctxs -
+ nr_compute_ctxs;
+
+ return (bool) (nr_compute_ctxs >= 2 ||
+ nr_noncompute_ctxs > 0);
+ }
+ } else {
+ /* Run the timer callback whenever you have at least 1 context
+ */
+ return (bool) (nr_running_ctxs > 0);
+ }
+}
+
+static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_backend_data *backend;
+ int s;
+ bool reset_needed = false;
+
+ KBASE_DEBUG_ASSERT(timer != NULL);
+
+ backend = container_of(timer, struct kbase_backend_data,
+ scheduling_timer);
+ kbdev = container_of(backend, struct kbase_device, hwaccess.backend);
+ js_devdata = &kbdev->js_data;
+
+ /* Loop through the slots */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
+ struct kbase_jd_atom *atom = NULL;
+
+ if (kbase_backend_nr_atoms_on_slot(kbdev, s) > 0) {
+ atom = kbase_gpu_inspect(kbdev, s, 0);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+ }
+
+ if (atom != NULL) {
+ /* The current version of the model doesn't support
+ * Soft-Stop */
+ if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
+ u32 ticks = atom->sched_info.cfs.ticks++;
+
+#if !CINSTR_DUMPING_ENABLED
+ u32 soft_stop_ticks, hard_stop_ticks,
+ gpu_reset_ticks;
+ if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+ soft_stop_ticks =
+ js_devdata->soft_stop_ticks_cl;
+ hard_stop_ticks =
+ js_devdata->hard_stop_ticks_cl;
+ gpu_reset_ticks =
+ js_devdata->gpu_reset_ticks_cl;
+ } else {
+ soft_stop_ticks =
+ js_devdata->soft_stop_ticks;
+ hard_stop_ticks =
+ js_devdata->hard_stop_ticks_ss;
+ gpu_reset_ticks =
+ js_devdata->gpu_reset_ticks_ss;
+ }
+
+ /* If timeouts have been changed then ensure
+ * that atom tick count is not greater than the
+ * new soft_stop timeout. This ensures that
+ * atoms do not miss any of the timeouts due to
+ * races between this worker and the thread
+ * changing the timeouts. */
+ if (backend->timeouts_updated &&
+ ticks > soft_stop_ticks)
+ ticks = atom->sched_info.cfs.ticks =
+ soft_stop_ticks;
+
+ /* Job is Soft-Stoppable */
+ if (ticks == soft_stop_ticks) {
+ int disjoint_threshold =
+ KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
+ u32 softstop_flags = 0u;
+ /* Job has been scheduled for at least
+ * js_devdata->soft_stop_ticks ticks.
+ * Soft stop the slot so we can run
+ * other jobs.
+ */
+ dev_dbg(kbdev->dev, "Soft-stop");
+#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+ /* nr_user_contexts_running is updated
+ * with the runpool_mutex, but we can't
+ * take that here.
+ *
+ * However, if it's about to be
+ * increased then the new context can't
+ * run any jobs until they take the
+ * hwaccess_lock, so it's OK to observe
+ * the older value.
+ *
+ * Similarly, if it's about to be
+ * decreased, the last job from another
+ * context has already finished, so it's
+ * not too bad that we observe the older
+ * value and register a disjoint event
+ * when we try soft-stopping */
+ if (js_devdata->nr_user_contexts_running
+ >= disjoint_threshold)
+ softstop_flags |=
+ JS_COMMAND_SW_CAUSES_DISJOINT;
+
+ kbase_job_slot_softstop_swflags(kbdev,
+ s, atom, softstop_flags);
+#endif
+ } else if (ticks == hard_stop_ticks) {
+ /* Job has been scheduled for at least
+ * js_devdata->hard_stop_ticks_ss ticks.
+ * It should have been soft-stopped by
+ * now. Hard stop the slot.
+ */
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
+ int ms =
+ js_devdata->scheduling_period_ns
+ / 1000000u;
+ dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
+ (unsigned long)ticks,
+ (unsigned long)ms);
+ kbase_job_slot_hardstop(atom->kctx, s,
+ atom);
+#endif
+ } else if (ticks == gpu_reset_ticks) {
+ /* Job has been scheduled for at least
+ * js_devdata->gpu_reset_ticks_ss ticks.
+ * It should have left the GPU by now.
+ * Signal that the GPU needs to be
+ * reset.
+ */
+ reset_needed = true;
+ }
+#else /* !CINSTR_DUMPING_ENABLED */
+ /* NOTE: During CINSTR_DUMPING_ENABLED, we use
+ * the alternate timeouts, which makes the hard-
+ * stop and GPU reset timeout much longer. We
+ * also ensure that we don't soft-stop at all.
+ */
+ if (ticks == js_devdata->soft_stop_ticks) {
+ /* Job has been scheduled for at least
+ * js_devdata->soft_stop_ticks. We do
+ * not soft-stop during
+ * CINSTR_DUMPING_ENABLED, however.
+ */
+ dev_dbg(kbdev->dev, "Soft-stop");
+ } else if (ticks ==
+ js_devdata->hard_stop_ticks_dumping) {
+ /* Job has been scheduled for at least
+ * js_devdata->hard_stop_ticks_dumping
+ * ticks. Hard stop the slot.
+ */
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
+ int ms =
+ js_devdata->scheduling_period_ns
+ / 1000000u;
+ dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
+ (unsigned long)ticks,
+ (unsigned long)ms);
+ kbase_job_slot_hardstop(atom->kctx, s,
+ atom);
+#endif
+ } else if (ticks ==
+ js_devdata->gpu_reset_ticks_dumping) {
+ /* Job has been scheduled for at least
+ * js_devdata->gpu_reset_ticks_dumping
+ * ticks. It should have left the GPU by
+ * now. Signal that the GPU needs to be
+ * reset.
+ */
+ reset_needed = true;
+ }
+#endif /* !CINSTR_DUMPING_ENABLED */
+ }
+ }
+ }
+#if KBASE_GPU_RESET_EN
+ if (reset_needed) {
+ dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve.");
+
+ if (kbase_prepare_to_reset_gpu_locked(kbdev))
+ kbase_reset_gpu_locked(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ /* the timer is re-issued if there is contexts in the run-pool */
+
+ if (backend->timer_running)
+ hrtimer_start(&backend->scheduling_timer,
+ HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
+ HRTIMER_MODE_REL);
+
+ backend->timeouts_updated = false;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ unsigned long flags;
+
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ if (!timer_callback_should_run(kbdev)) {
+ /* Take spinlock to force synchronisation with timer */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ backend->timer_running = false;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ /* From now on, return value of timer_callback_should_run() will
+ * also cause the timer to not requeue itself. Its return value
+ * cannot change, because it depends on variables updated with
+ * the runpool_mutex held, which the caller of this must also
+ * hold */
+ hrtimer_cancel(&backend->scheduling_timer);
+ }
+
+ if (timer_callback_should_run(kbdev) && !backend->timer_running) {
+ /* Take spinlock to force synchronisation with timer */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ backend->timer_running = true;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ hrtimer_start(&backend->scheduling_timer,
+ HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
+ HRTIMER_MODE_REL);
+
+ KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u,
+ 0u);
+ }
+}
+
+int kbase_backend_timer_init(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ backend->scheduling_timer.function = timer_callback;
+
+ backend->timer_running = false;
+
+ return 0;
+}
+
+void kbase_backend_timer_term(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ hrtimer_cancel(&backend->scheduling_timer);
+}
+
+void kbase_backend_timer_suspend(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ backend->suspend_timer = true;
+
+ kbase_backend_ctx_count_changed(kbdev);
+}
+
+void kbase_backend_timer_resume(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ backend->suspend_timer = false;
+
+ kbase_backend_ctx_count_changed(kbdev);
+}
+
+void kbase_backend_timeouts_changed(struct kbase_device *kbdev)
+{
+ struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+ backend->timeouts_updated = true;
+}
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_js_internal.h b/midgard-0.r2p0/backend/gpu/mali_kbase_js_internal.h
new file mode 100755
index 0000000..3f53779
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_js_internal.h
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Register-based HW access backend specific job scheduler APIs
+ */
+
+#ifndef _KBASE_JS_BACKEND_H_
+#define _KBASE_JS_BACKEND_H_
+
+/**
+ * kbase_backend_timer_init() - Initialise the JS scheduling timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called at driver initialisation
+ *
+ * Return: 0 on success
+ */
+int kbase_backend_timer_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_term() - Terminate the JS scheduling timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called at driver termination
+ */
+void kbase_backend_timer_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_suspend - Suspend is happening, stop the JS scheduling
+ * timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called on suspend, after the active count has reached
+ * zero. This is required as the timer may have been started on job submission
+ * to the job scheduler, but before jobs are submitted to the GPU.
+ *
+ * Caller must hold runpool_mutex.
+ */
+void kbase_backend_timer_suspend(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_resume - Resume is happening, re-evaluate the JS
+ * scheduling timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called on resume. Note that is is not guaranteed to
+ * re-start the timer, only evalute whether it should be re-started.
+ *
+ * Caller must hold runpool_mutex.
+ */
+void kbase_backend_timer_resume(struct kbase_device *kbdev);
+
+#endif /* _KBASE_JS_BACKEND_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_mmu_hw_direct.c b/midgard-0.r2p0/backend/gpu/mali_kbase_mmu_hw_direct.c
new file mode 100755
index 0000000..08eea1c
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -0,0 +1,403 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/bitops.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_tlstream.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
+ u32 num_pages)
+{
+ u64 region;
+
+ /* can't lock a zero sized range */
+ KBASE_DEBUG_ASSERT(num_pages);
+
+ region = pfn << PAGE_SHIFT;
+ /*
+ * fls returns (given the ASSERT above):
+ * 1 .. 32
+ *
+ * 10 + fls(num_pages)
+ * results in the range (11 .. 42)
+ */
+
+ /* gracefully handle num_pages being zero */
+ if (0 == num_pages) {
+ region |= 11;
+ } else {
+ u8 region_width;
+
+ region_width = 10 + fls(num_pages);
+ if (num_pages != (1ul << (region_width - 11))) {
+ /* not pow2, so must go up to the next pow2 */
+ region_width += 1;
+ }
+ KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
+ KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
+ region |= region_width;
+ }
+
+ return region;
+}
+
+static int wait_ready(struct kbase_device *kbdev,
+ unsigned int as_nr, struct kbase_context *kctx)
+{
+ unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
+ u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
+
+ /* Wait for the MMU status to indicate there is no active command, in
+ * case one is pending. Do not log remaining register accesses. */
+ while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
+ val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL);
+
+ if (max_loops == 0) {
+ dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
+ return -1;
+ }
+
+ /* If waiting in loop was performed, log last read value. */
+ if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
+ kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
+
+ return 0;
+}
+
+static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
+ struct kbase_context *kctx)
+{
+ int status;
+
+ /* write AS_COMMAND when MMU is ready to accept another command */
+ status = wait_ready(kbdev, as_nr, kctx);
+ if (status == 0)
+ kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd,
+ kctx);
+
+ return status;
+}
+
+static void validate_protected_page_fault(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ /* GPUs which support (native) protected mode shall not report page
+ * fault addresses unless it has protected debug mode and protected
+ * debug mode is turned on */
+ u32 protected_debug_mode = 0;
+
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
+ return;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+ protected_debug_mode = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_STATUS),
+ kctx) & GPU_DBGEN;
+ }
+
+ if (!protected_debug_mode) {
+ /* fault_addr should never be reported in protected mode.
+ * However, we just continue by printing an error message */
+ dev_err(kbdev->dev, "Fault address reported in protected mode\n");
+ }
+}
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
+{
+ const int num_as = 16;
+ const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
+ const int pf_shift = 0;
+ const unsigned long as_bit_mask = (1UL << num_as) - 1;
+ unsigned long flags;
+ u32 new_mask;
+ u32 tmp;
+
+ /* bus faults */
+ u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
+ /* page faults (note: Ignore ASes with both pf and bf) */
+ u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ /* remember current mask */
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+ new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
+ /* mask interrupts for now */
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+
+ while (bf_bits | pf_bits) {
+ struct kbase_as *as;
+ int as_no;
+ struct kbase_context *kctx;
+
+ /*
+ * the while logic ensures we have a bit set, no need to check
+ * for not-found here
+ */
+ as_no = ffs(bf_bits | pf_bits) - 1;
+ as = &kbdev->as[as_no];
+
+ /*
+ * Refcount the kctx ASAP - it shouldn't disappear anyway, since
+ * Bus/Page faults _should_ only occur whilst jobs are running,
+ * and a job causing the Bus/Page fault shouldn't complete until
+ * the MMU is updated
+ */
+ kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
+
+
+ /* find faulting address */
+ as->fault_addr = kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no,
+ AS_FAULTADDRESS_HI),
+ kctx);
+ as->fault_addr <<= 32;
+ as->fault_addr |= kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no,
+ AS_FAULTADDRESS_LO),
+ kctx);
+
+ /* Mark the fault protected or not */
+ as->protected_mode = kbdev->protected_mode;
+
+ if (kbdev->protected_mode && as->fault_addr)
+ {
+ /* check if address reporting is allowed */
+ validate_protected_page_fault(kbdev, kctx);
+ }
+
+ /* report the fault to debugfs */
+ kbase_as_fault_debugfs_new(kbdev, as_no);
+
+ /* record the fault status */
+ as->fault_status = kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no,
+ AS_FAULTSTATUS),
+ kctx);
+
+ /* find the fault type */
+ as->fault_type = (bf_bits & (1 << as_no)) ?
+ KBASE_MMU_FAULT_TYPE_BUS :
+ KBASE_MMU_FAULT_TYPE_PAGE;
+
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ as->fault_extra_addr = kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
+ kctx);
+ as->fault_extra_addr <<= 32;
+ as->fault_extra_addr |= kbase_reg_read(kbdev,
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
+ kctx);
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+
+ if (kbase_as_has_bus_fault(as)) {
+ /* Mark bus fault as handled.
+ * Note that a bus fault is processed first in case
+ * where both a bus fault and page fault occur.
+ */
+ bf_bits &= ~(1UL << as_no);
+
+ /* remove the queued BF (and PF) from the mask */
+ new_mask &= ~(MMU_BUS_ERROR(as_no) |
+ MMU_PAGE_FAULT(as_no));
+ } else {
+ /* Mark page fault as handled */
+ pf_bits &= ~(1UL << as_no);
+
+ /* remove the queued PF from the mask */
+ new_mask &= ~MMU_PAGE_FAULT(as_no);
+ }
+
+ /* Process the interrupt for this address space */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_interrupt_process(kbdev, kctx, as);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+
+ /* reenable interrupts */
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+ tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
+ new_mask |= tmp;
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx)
+{
+ struct kbase_mmu_setup *current_setup = &as->current_setup;
+ u32 transcfg = 0;
+
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ transcfg = current_setup->transcfg & 0xFFFFFFFFUL;
+
+ /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
+ /* Clear PTW_MEMATTR bits */
+ transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
+ /* Enable correct PTW_MEMATTR bits */
+ transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
+
+ if (kbdev->system_coherency == COHERENCY_ACE) {
+ /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
+ /* Clear PTW_SH bits */
+ transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
+ /* Enable correct PTW_SH bits */
+ transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
+ }
+
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
+ transcfg, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
+ (current_setup->transcfg >> 32) & 0xFFFFFFFFUL, kctx);
+
+#else /* CONFIG_MALI_GPU_MMU_AARCH64 */
+
+ if (kbdev->system_coherency == COHERENCY_ACE)
+ current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
+
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
+ current_setup->transtab & 0xFFFFFFFFUL, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
+ (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
+
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
+ current_setup->memattr & 0xFFFFFFFFUL, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
+ (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
+
+ kbase_tlstream_tl_attrib_as_config(as,
+ current_setup->transtab,
+ current_setup->memattr,
+ transcfg);
+
+ write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
+}
+
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
+ unsigned int handling_irq)
+{
+ int ret;
+
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+ if (op == AS_COMMAND_UNLOCK) {
+ /* Unlock doesn't require a lock first */
+ ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+ } else {
+ u64 lock_addr = lock_region(kbdev, vpfn, nr);
+
+ /* Lock the region that needs to be updated */
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
+ lock_addr & 0xFFFFFFFFUL, kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
+ (lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
+ write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
+
+ /* Run the MMU operation */
+ write_cmd(kbdev, as->number, op, kctx);
+
+ /* Wait for the flush to complete */
+ ret = wait_ready(kbdev, as->number, kctx);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
+ /* Issue an UNLOCK command to ensure that valid page
+ tables are re-read by the GPU after an update.
+ Note that, the FLUSH command should perform all the
+ actions necessary, however the bus logs show that if
+ multiple page faults occur within an 8 page region
+ the MMU does not always re-read the updated page
+ table entries for later faults or is only partially
+ read, it subsequently raises the page fault IRQ for
+ the same addresses, the unlock ensures that the MMU
+ cache is flushed, so updates can be re-read. As the
+ region is now unlocked we need to issue 2 UNLOCK
+ commands in order to flush the MMU/uTLB,
+ see PRLAM-8812.
+ */
+ write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+ write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+ }
+ }
+
+ return ret;
+}
+
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type)
+{
+ unsigned long flags;
+ u32 pf_bf_mask;
+
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+
+ /*
+ * A reset is in-flight and we're flushing the IRQ + bottom half
+ * so don't update anything as it could race with the reset code.
+ */
+ if (kbdev->irq_reset_flush)
+ goto unlock;
+
+ /* Clear the page (and bus fault IRQ as well in case one occurred) */
+ pf_bf_mask = MMU_PAGE_FAULT(as->number);
+ if (type == KBASE_MMU_FAULT_TYPE_BUS ||
+ type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+ pf_bf_mask |= MMU_BUS_ERROR(as->number);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx);
+
+unlock:
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type)
+{
+ unsigned long flags;
+ u32 irq_mask;
+
+ /* Enable the page fault IRQ (and bus fault IRQ as well in case one
+ * occurred) */
+ spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+
+ /*
+ * A reset is in-flight and we're flushing the IRQ + bottom half
+ * so don't update anything as it could race with the reset code.
+ */
+ if (kbdev->irq_reset_flush)
+ goto unlock;
+
+ irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) |
+ MMU_PAGE_FAULT(as->number);
+
+ if (type == KBASE_MMU_FAULT_TYPE_BUS ||
+ type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+ irq_mask |= MMU_BUS_ERROR(as->number);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx);
+
+unlock:
+ spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_mmu_hw_direct.h b/midgard-0.r2p0/backend/gpu/mali_kbase_mmu_hw_direct.h
new file mode 100755
index 0000000..c02253c
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_mmu_hw_direct.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Interface file for the direct implementation for MMU hardware access
+ *
+ * Direct MMU hardware interface
+ *
+ * This module provides the interface(s) that are required by the direct
+ * register access implementation of the MMU hardware interface
+ */
+
+#ifndef _MALI_KBASE_MMU_HW_DIRECT_H_
+#define _MALI_KBASE_MMU_HW_DIRECT_H_
+
+#include <mali_kbase_defs.h>
+
+/**
+ * kbase_mmu_interrupt - Process an MMU interrupt.
+ *
+ * Process the MMU interrupt that was reported by the &kbase_device.
+ *
+ * @kbdev: kbase context to clear the fault from.
+ * @irq_stat: Value of the MMU_IRQ_STATUS register
+ */
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+#endif /* _MALI_KBASE_MMU_HW_DIRECT_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_always_on.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_always_on.c
new file mode 100755
index 0000000..0614348
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_always_on.c
@@ -0,0 +1,63 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * "Always on" power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static u64 always_on_get_core_mask(struct kbase_device *kbdev)
+{
+ return kbdev->gpu_props.props.raw_props.shader_present;
+}
+
+static bool always_on_get_core_active(struct kbase_device *kbdev)
+{
+ return true;
+}
+
+static void always_on_init(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static void always_on_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_always_on_policy_ops = {
+ "always_on", /* name */
+ always_on_init, /* init */
+ always_on_term, /* term */
+ always_on_get_core_mask, /* get_core_mask */
+ always_on_get_core_active, /* get_core_active */
+ 0u, /* flags */
+ KBASE_PM_POLICY_ID_ALWAYS_ON, /* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops);
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_always_on.h b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_always_on.h
new file mode 100755
index 0000000..f9d244b
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_always_on.h
@@ -0,0 +1,77 @@
+
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * "Always on" power management policy
+ */
+
+#ifndef MALI_KBASE_PM_ALWAYS_ON_H
+#define MALI_KBASE_PM_ALWAYS_ON_H
+
+/**
+ * DOC:
+ * The "Always on" power management policy has the following
+ * characteristics:
+ *
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ * know which Job Chains are to be run:
+ * All Shader Cores are powered up, regardless of whether or not they will
+ * be needed later.
+ *
+ * - When KBase indicates that a set of Shader Cores are needed to submit the
+ * currently queued Job Chains:
+ * All Shader Cores are kept powered, regardless of whether or not they will
+ * be needed
+ *
+ * - When KBase indicates that the GPU need not be powered:
+ * The Shader Cores are kept powered, regardless of whether or not they will
+ * be needed. The GPU itself is also kept powered, even though it is not
+ * needed.
+ *
+ * This policy is automatically overridden during system suspend: the desired
+ * core state is ignored, and the cores are forced off regardless of what the
+ * policy requests. After resuming from suspend, new changes to the desired
+ * core state made by the policy are honored.
+ *
+ * Note:
+ *
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ * has just started to submit Job Chains.
+ *
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ * User Processes have finished, and it is waiting for a User Process to
+ * submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_always_on - Private struct for policy instance data
+ * @dummy: unused dummy variable
+ *
+ * This contains data that is private to the particular power policy that is
+ * active.
+ */
+struct kbasep_pm_policy_always_on {
+ int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_always_on_policy_ops;
+
+#endif /* MALI_KBASE_PM_ALWAYS_ON_H */
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_backend.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_backend.c
new file mode 100755
index 0000000..7690ec5
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_backend.c
@@ -0,0 +1,460 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * GPU backend implementation of base kernel power management APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_config_defaults.h>
+#ifdef CONFIG_MALI_PLATFORM_DEVICETREE
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
+
+#include <mali_kbase_pm.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
+
+void kbase_pm_register_access_enable(struct kbase_device *kbdev)
+{
+ struct kbase_pm_callback_conf *callbacks;
+
+ callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+ if (callbacks)
+ callbacks->power_on_callback(kbdev);
+
+ kbdev->pm.backend.gpu_powered = true;
+}
+
+void kbase_pm_register_access_disable(struct kbase_device *kbdev)
+{
+ struct kbase_pm_callback_conf *callbacks;
+
+ callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+ if (callbacks)
+ callbacks->power_off_callback(kbdev);
+
+ kbdev->pm.backend.gpu_powered = false;
+}
+
+int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
+{
+ int ret = 0;
+ struct kbase_pm_callback_conf *callbacks;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ mutex_init(&kbdev->pm.lock);
+
+ kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
+ kbase_pm_gpu_poweroff_wait_wq);
+
+ kbdev->pm.backend.gpu_powered = false;
+ kbdev->pm.suspending = false;
+#ifdef CONFIG_MALI_DEBUG
+ kbdev->pm.backend.driver_ready_for_irqs = false;
+#endif /* CONFIG_MALI_DEBUG */
+ kbdev->pm.backend.gpu_in_desired_state = true;
+ init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
+
+ callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+ if (callbacks) {
+ kbdev->pm.backend.callback_power_on =
+ callbacks->power_on_callback;
+ kbdev->pm.backend.callback_power_off =
+ callbacks->power_off_callback;
+ kbdev->pm.backend.callback_power_suspend =
+ callbacks->power_suspend_callback;
+ kbdev->pm.backend.callback_power_resume =
+ callbacks->power_resume_callback;
+ kbdev->pm.callback_power_runtime_init =
+ callbacks->power_runtime_init_callback;
+ kbdev->pm.callback_power_runtime_term =
+ callbacks->power_runtime_term_callback;
+ kbdev->pm.backend.callback_power_runtime_on =
+ callbacks->power_runtime_on_callback;
+ kbdev->pm.backend.callback_power_runtime_off =
+ callbacks->power_runtime_off_callback;
+ kbdev->pm.backend.callback_power_runtime_idle =
+ callbacks->power_runtime_idle_callback;
+ } else {
+ kbdev->pm.backend.callback_power_on = NULL;
+ kbdev->pm.backend.callback_power_off = NULL;
+ kbdev->pm.backend.callback_power_suspend = NULL;
+ kbdev->pm.backend.callback_power_resume = NULL;
+ kbdev->pm.callback_power_runtime_init = NULL;
+ kbdev->pm.callback_power_runtime_term = NULL;
+ kbdev->pm.backend.callback_power_runtime_on = NULL;
+ kbdev->pm.backend.callback_power_runtime_off = NULL;
+ kbdev->pm.backend.callback_power_runtime_idle = NULL;
+ }
+
+ /* Initialise the metrics subsystem */
+ ret = kbasep_pm_metrics_init(kbdev);
+ if (ret)
+ return ret;
+
+ init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
+ kbdev->pm.backend.l2_powered = 0;
+
+ init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
+ kbdev->pm.backend.reset_done = false;
+
+ init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
+ kbdev->pm.active_count = 0;
+
+ spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
+ spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
+
+ init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
+
+ if (kbase_pm_ca_init(kbdev) != 0)
+ goto workq_fail;
+
+ if (kbase_pm_policy_init(kbdev) != 0)
+ goto pm_policy_fail;
+
+ return 0;
+
+pm_policy_fail:
+ kbase_pm_ca_term(kbdev);
+workq_fail:
+ kbasep_pm_metrics_term(kbdev);
+ return -EINVAL;
+}
+
+void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
+{
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ /* Turn clocks and interrupts on - no-op if we haven't done a previous
+ * kbase_pm_clock_off() */
+ kbase_pm_clock_on(kbdev, is_resume);
+
+ /* Update core status as required by the policy */
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
+ kbase_pm_update_cores_state(kbdev);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
+
+ /* NOTE: We don't wait to reach the desired state, since running atoms
+ * will wait for that state to be reached anyway */
+}
+
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
+{
+ struct kbase_device *kbdev = container_of(data, struct kbase_device,
+ pm.backend.gpu_poweroff_wait_work);
+ struct kbase_pm_device_data *pm = &kbdev->pm;
+ struct kbase_pm_backend_data *backend = &pm->backend;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned long flags;
+
+ /* Wait for power transitions to complete. We do this with no locks held
+ * so that we don't deadlock with any pending workqueues */
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
+ kbase_pm_check_transitions_sync(kbdev);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ if (!backend->poweron_required) {
+ WARN_ON(kbdev->l2_available_bitmap ||
+ kbdev->shader_available_bitmap ||
+ kbdev->tiler_available_bitmap);
+
+ /* Consume any change-state events */
+ kbase_timeline_pm_check_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+
+ /* Disable interrupts and turn the clock off */
+ if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
+ /*
+ * Page/bus faults are pending, must drop locks to
+ * process. Interrupts are disabled so no more faults
+ * should be generated at this point.
+ */
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ kbase_flush_mmu_wqs(kbdev);
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ /* Turn off clock now that fault have been handled. We
+ * dropped locks so poweron_required may have changed -
+ * power back on if this is the case.*/
+ if (backend->poweron_required)
+ kbase_pm_clock_on(kbdev, false);
+ else
+ WARN_ON(!kbase_pm_clock_off(kbdev,
+ backend->poweroff_is_suspend));
+ }
+ }
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ backend->poweroff_wait_in_progress = false;
+ if (backend->poweron_required) {
+ backend->poweron_required = false;
+ kbase_pm_update_cores_state_nolock(kbdev);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ wake_up(&kbdev->pm.backend.poweroff_wait);
+}
+
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
+{
+ unsigned long flags;
+
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (!kbdev->pm.backend.poweroff_wait_in_progress) {
+ /* Force all cores off */
+ kbdev->pm.backend.desired_shader_state = 0;
+ kbdev->pm.backend.desired_tiler_state = 0;
+
+ /* Force all cores to be unavailable, in the situation where
+ * transitions are in progress for some cores but not others,
+ * and kbase_pm_check_transitions_nolock can not immediately
+ * power off the cores */
+ kbdev->shader_available_bitmap = 0;
+ kbdev->tiler_available_bitmap = 0;
+ kbdev->l2_available_bitmap = 0;
+
+ kbdev->pm.backend.poweroff_wait_in_progress = true;
+ kbdev->pm.backend.poweroff_is_suspend = is_suspend;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ /*Kick off wq here. Callers will have to wait*/
+ queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
+ &kbdev->pm.backend.gpu_poweroff_wait_work);
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+}
+
+static bool is_poweroff_in_progress(struct kbase_device *kbdev)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return ret;
+}
+
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
+{
+ wait_event_killable(kbdev->pm.backend.poweroff_wait,
+ is_poweroff_in_progress(kbdev));
+}
+
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+ unsigned int flags)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned long irq_flags;
+ int ret;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ /* A suspend won't happen during startup/insmod */
+ KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+ /* Power up the GPU, don't enable IRQs as we are not ready to receive
+ * them. */
+ ret = kbase_pm_init_hw(kbdev, flags);
+ if (ret) {
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ return ret;
+ }
+
+ kbasep_pm_read_present_cores(kbdev);
+
+ kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
+ kbdev->pm.debug_core_mask[1] =
+ kbdev->pm.debug_core_mask[2] =
+ kbdev->gpu_props.props.raw_props.shader_present;
+
+ /* Pretend the GPU is active to prevent a power policy turning the GPU
+ * cores off */
+ kbdev->pm.active_count = 1;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ irq_flags);
+ /* Ensure cycle counter is off */
+ kbdev->pm.backend.gpu_cycle_counter_requests = 0;
+ spin_unlock_irqrestore(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ irq_flags);
+
+ /* We are ready to receive IRQ's now as power policy is set up, so
+ * enable them now. */
+#ifdef CONFIG_MALI_DEBUG
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
+ kbdev->pm.backend.driver_ready_for_irqs = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
+#endif
+ kbase_pm_enable_interrupts(kbdev);
+
+ /* Turn on the GPU and any cores needed by the policy */
+ kbase_pm_do_poweron(kbdev, false);
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /* Idle the GPU and/or cores, if the policy wants it to */
+ kbase_pm_context_idle(kbdev);
+
+ return 0;
+}
+
+void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ mutex_lock(&kbdev->pm.lock);
+ kbase_pm_cancel_deferred_poweroff(kbdev);
+ kbase_pm_do_poweroff(kbdev, false);
+ mutex_unlock(&kbdev->pm.lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
+
+void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
+
+ /* Free any resources the policy allocated */
+ kbase_pm_policy_term(kbdev);
+ kbase_pm_ca_term(kbdev);
+
+ /* Shut down the metrics subsystem */
+ kbasep_pm_metrics_term(kbdev);
+
+ destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
+}
+
+void kbase_pm_power_changed(struct kbase_device *kbdev)
+{
+ bool cores_are_available;
+ unsigned long flags;
+
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
+
+ if (cores_are_available) {
+ /* Log timelining information that a change in state has
+ * completed */
+ kbase_timeline_pm_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+
+ kbase_backend_slot_update(kbdev);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+ u64 new_core_mask_js0, u64 new_core_mask_js1,
+ u64 new_core_mask_js2)
+{
+ kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
+ kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
+ kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
+ kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
+ new_core_mask_js2;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+}
+
+void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
+{
+ kbase_pm_update_active(kbdev);
+}
+
+void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
+{
+ kbase_pm_update_active(kbdev);
+}
+
+void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ /* Force power off the GPU and all cores (regardless of policy), only
+ * after the PM active count reaches zero (otherwise, we risk turning it
+ * off prematurely) */
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ kbase_pm_cancel_deferred_poweroff(kbdev);
+ kbase_pm_do_poweroff(kbdev, true);
+
+ kbase_backend_timer_suspend(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ kbase_pm_wait_for_poweroff_complete(kbdev);
+}
+
+void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ kbdev->pm.suspending = false;
+ kbase_pm_do_poweron(kbdev, true);
+
+ kbase_backend_timer_resume(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+}
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca.c
new file mode 100755
index 0000000..e8cd8cb
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca.c
@@ -0,0 +1,179 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Base kernel core availability APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+static const struct kbase_pm_ca_policy *const policy_list[] = {
+ &kbase_pm_ca_fixed_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+ &kbase_pm_ca_random_policy_ops
+#endif
+};
+
+/**
+ * POLICY_COUNT - The number of policies available in the system.
+ *
+ * This is derived from the number of functions listed in policy_list.
+ */
+#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
+
+int kbase_pm_ca_init(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ kbdev->pm.backend.ca_current_policy = policy_list[0];
+
+ kbdev->pm.backend.ca_current_policy->init(kbdev);
+
+ return 0;
+}
+
+void kbase_pm_ca_term(struct kbase_device *kbdev)
+{
+ kbdev->pm.backend.ca_current_policy->term(kbdev);
+}
+
+int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list)
+{
+ if (!list)
+ return POLICY_COUNT;
+
+ *list = policy_list;
+
+ return POLICY_COUNT;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_list_policies);
+
+const struct kbase_pm_ca_policy
+*kbase_pm_ca_get_policy(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ return kbdev->pm.backend.ca_current_policy;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_get_policy);
+
+void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
+ const struct kbase_pm_ca_policy *new_policy)
+{
+ const struct kbase_pm_ca_policy *old_policy;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(new_policy != NULL);
+
+ KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u,
+ new_policy->id);
+
+ /* During a policy change we pretend the GPU is active */
+ /* A suspend won't happen here, because we're in a syscall from a
+ * userspace thread */
+ kbase_pm_context_active(kbdev);
+
+ mutex_lock(&kbdev->pm.lock);
+
+ /* Remove the policy to prevent IRQ handlers from working on it */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ old_policy = kbdev->pm.backend.ca_current_policy;
+ kbdev->pm.backend.ca_current_policy = NULL;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ if (old_policy->term)
+ old_policy->term(kbdev);
+
+ if (new_policy->init)
+ new_policy->init(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.ca_current_policy = new_policy;
+
+ /* If any core power state changes were previously attempted, but
+ * couldn't be made because the policy was changing (current_policy was
+ * NULL), then re-try them here. */
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
+ kbdev->shader_ready_bitmap,
+ kbdev->shader_transitioning_bitmap);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* Now the policy change is finished, we release our fake context active
+ * reference */
+ kbase_pm_context_idle(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_set_policy);
+
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* All cores must be enabled when instrumentation is in use */
+ if (kbdev->pm.backend.instr_enabled)
+ return kbdev->gpu_props.props.raw_props.shader_present &
+ kbdev->pm.debug_core_mask_all;
+
+ if (kbdev->pm.backend.ca_current_policy == NULL)
+ return kbdev->gpu_props.props.raw_props.shader_present &
+ kbdev->pm.debug_core_mask_all;
+
+ return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) &
+ kbdev->pm.debug_core_mask_all;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask);
+
+void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
+ u64 cores_transitioning)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->pm.backend.ca_current_policy != NULL)
+ kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
+ cores_ready,
+ cores_transitioning);
+}
+
+void kbase_pm_ca_instr_enable(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.instr_enabled = true;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_ca_instr_disable(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ kbdev->pm.backend.instr_enabled = false;
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+}
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca.h b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca.h
new file mode 100755
index 0000000..ee9e751
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Base kernel core availability APIs
+ */
+
+#ifndef _KBASE_PM_CA_H_
+#define _KBASE_PM_CA_H_
+
+/**
+ * kbase_pm_ca_init - Initialize core availability framework
+ *
+ * Must be called before calling any other core availability function
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 if the core availability framework was successfully initialized,
+ * -errno otherwise
+ */
+int kbase_pm_ca_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_term - Terminate core availability framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_ca_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_get_core_mask - Get currently available shaders core mask
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Returns a mask of the currently available shader cores.
+ * Calls into the core availability policy
+ *
+ * Return: The bit mask of available cores
+ */
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_update_core_status - Update core status
+ *
+ * @kbdev: The kbase device structure for the device (must be
+ * a valid pointer)
+ * @cores_ready: The bit mask of cores ready for job submission
+ * @cores_transitioning: The bit mask of cores that are transitioning power
+ * state
+ *
+ * Update core availability policy with current core power status
+ *
+ * Calls into the core availability policy
+ */
+void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
+ u64 cores_transitioning);
+
+/**
+ * kbase_pm_ca_instr_enable - Enable override for instrumentation
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This overrides the output of the core availability policy, ensuring that all
+ * cores are available
+ */
+void kbase_pm_ca_instr_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_instr_disable - Disable override for instrumentation
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This disables any previously enabled override, and resumes normal policy
+ * functionality
+ */
+void kbase_pm_ca_instr_disable(struct kbase_device *kbdev);
+
+#endif /* _KBASE_PM_CA_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca_fixed.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca_fixed.c
new file mode 100755
index 0000000..864612d
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca_fixed.c
@@ -0,0 +1,65 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * A power policy implementing fixed core availability
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static void fixed_init(struct kbase_device *kbdev)
+{
+ kbdev->pm.backend.ca_in_transition = false;
+}
+
+static void fixed_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static u64 fixed_get_core_mask(struct kbase_device *kbdev)
+{
+ return kbdev->gpu_props.props.raw_props.shader_present;
+}
+
+static void fixed_update_core_status(struct kbase_device *kbdev,
+ u64 cores_ready,
+ u64 cores_transitioning)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(cores_ready);
+ CSTD_UNUSED(cores_transitioning);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the fixed power policy.
+ *
+ * This is the static structure that defines the fixed power policy's callback
+ * and name.
+ */
+const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = {
+ "fixed", /* name */
+ fixed_init, /* init */
+ fixed_term, /* term */
+ fixed_get_core_mask, /* get_core_mask */
+ fixed_update_core_status, /* update_core_status */
+ 0u, /* flags */
+ KBASE_PM_CA_POLICY_ID_FIXED, /* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_fixed_policy_ops);
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca_fixed.h b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca_fixed.h
new file mode 100755
index 0000000..a763155
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_ca_fixed.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * A power policy implementing fixed core availability
+ */
+
+#ifndef MALI_KBASE_PM_CA_FIXED_H
+#define MALI_KBASE_PM_CA_FIXED_H
+
+/**
+ * struct kbasep_pm_ca_policy_fixed - Private structure for policy instance data
+ *
+ * @dummy: Dummy member - no state is needed
+ *
+ * This contains data that is private to the particular power policy that is
+ * active.
+ */
+struct kbasep_pm_ca_policy_fixed {
+ int dummy;
+};
+
+extern const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops;
+
+#endif /* MALI_KBASE_PM_CA_FIXED_H */
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_coarse_demand.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_coarse_demand.c
new file mode 100755
index 0000000..f891fa2
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_coarse_demand.c
@@ -0,0 +1,70 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * "Coarse Demand" power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static u64 coarse_demand_get_core_mask(struct kbase_device *kbdev)
+{
+ if (kbdev->pm.active_count == 0)
+ return 0;
+
+ return kbdev->gpu_props.props.raw_props.shader_present;
+}
+
+static bool coarse_demand_get_core_active(struct kbase_device *kbdev)
+{
+ if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
+ kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
+ && !kbdev->tiler_inuse_cnt)
+ return false;
+
+ return true;
+}
+
+static void coarse_demand_init(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static void coarse_demand_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+/* The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
+ "coarse_demand", /* name */
+ coarse_demand_init, /* init */
+ coarse_demand_term, /* term */
+ coarse_demand_get_core_mask, /* get_core_mask */
+ coarse_demand_get_core_active, /* get_core_active */
+ 0u, /* flags */
+ KBASE_PM_POLICY_ID_COARSE_DEMAND, /* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops);
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_coarse_demand.h b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_coarse_demand.h
new file mode 100755
index 0000000..749d305
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_coarse_demand.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * "Coarse Demand" power management policy
+ */
+
+#ifndef MALI_KBASE_PM_COARSE_DEMAND_H
+#define MALI_KBASE_PM_COARSE_DEMAND_H
+
+/**
+ * DOC:
+ * The "Coarse" demand power management policy has the following
+ * characteristics:
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ * know which Job Chains are to be run:
+ * - All Shader Cores are powered up, regardless of whether or not they will
+ * be needed later.
+ * - When KBase indicates that a set of Shader Cores are needed to submit the
+ * currently queued Job Chains:
+ * - All Shader Cores are kept powered, regardless of whether or not they will
+ * be needed
+ * - When KBase indicates that the GPU need not be powered:
+ * - The Shader Cores are powered off, and the GPU itself is powered off too.
+ *
+ * @note:
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ * has just started to submit Job Chains.
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ * User Processes have finished, and it is waiting for a User Process to
+ * submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_coarse_demand - Private structure for coarse demand
+ * policy
+ *
+ * This contains data that is private to the coarse demand power policy.
+ *
+ * @dummy: Dummy member - no state needed
+ */
+struct kbasep_pm_policy_coarse_demand {
+ int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops;
+
+#endif /* MALI_KBASE_PM_COARSE_DEMAND_H */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_defs.h b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_defs.h
new file mode 100755
index 0000000..99fb62d
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_defs.h
@@ -0,0 +1,504 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Backend-specific Power Manager definitions
+ */
+
+#ifndef _KBASE_PM_HWACCESS_DEFS_H_
+#define _KBASE_PM_HWACCESS_DEFS_H_
+
+#include "mali_kbase_pm_ca_fixed.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_pm_ca_random.h"
+#endif
+
+#include "mali_kbase_pm_always_on.h"
+#include "mali_kbase_pm_coarse_demand.h"
+#include "mali_kbase_pm_demand.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_pm_demand_always_powered.h"
+#include "mali_kbase_pm_fast_start.h"
+#endif
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+struct kbase_jd_atom;
+
+/**
+ * enum kbase_pm_core_type - The types of core in a GPU.
+ *
+ * These enumerated values are used in calls to
+ * - kbase_pm_get_present_cores()
+ * - kbase_pm_get_active_cores()
+ * - kbase_pm_get_trans_cores()
+ * - kbase_pm_get_ready_cores().
+ *
+ * They specify which type of core should be acted on. These values are set in
+ * a manner that allows core_type_to_reg() function to be simpler and more
+ * efficient.
+ *
+ * @KBASE_PM_CORE_L2: The L2 cache
+ * @KBASE_PM_CORE_SHADER: Shader cores
+ * @KBASE_PM_CORE_TILER: Tiler cores
+ */
+enum kbase_pm_core_type {
+ KBASE_PM_CORE_L2 = L2_PRESENT_LO,
+ KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO,
+ KBASE_PM_CORE_TILER = TILER_PRESENT_LO
+};
+
+/**
+ * struct kbasep_pm_metrics_data - Metrics data collected for use by the power
+ * management framework.
+ *
+ * @time_period_start: time at which busy/idle measurements started
+ * @time_busy: number of ns the GPU was busy executing jobs since the
+ * @time_period_start timestamp.
+ * @time_idle: number of ns since time_period_start the GPU was not executing
+ * jobs since the @time_period_start timestamp.
+ * @prev_busy: busy time in ns of previous time period.
+ * Updated when metrics are reset.
+ * @prev_idle: idle time in ns of previous time period
+ * Updated when metrics are reset.
+ * @gpu_active: true when the GPU is executing jobs. false when
+ * not. Updated when the job scheduler informs us a job in submitted
+ * or removed from a GPU slot.
+ * @busy_cl: number of ns the GPU was busy executing CL jobs. Note that
+ * if two CL jobs were active for 400ns, this value would be updated
+ * with 800.
+ * @busy_gl: number of ns the GPU was busy executing GL jobs. Note that
+ * if two GL jobs were active for 400ns, this value would be updated
+ * with 800.
+ * @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device.
+ * @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot. As
+ * GL jobs never run on slot 2 this slot is not recorded.
+ * @lock: spinlock protecting the kbasep_pm_metrics_data structure
+ * @timer: timer to regularly make DVFS decisions based on the power
+ * management metrics.
+ * @timer_active: boolean indicating @timer is running
+ * @platform_data: pointer to data controlled by platform specific code
+ * @kbdev: pointer to kbase device for which metrics are collected
+ *
+ */
+struct kbasep_pm_metrics_data {
+ ktime_t time_period_start;
+ u32 time_busy;
+ u32 time_idle;
+ u32 prev_busy;
+ u32 prev_idle;
+ bool gpu_active;
+ u32 busy_cl[2];
+ u32 busy_gl;
+ u32 active_cl_ctx[2];
+ u32 active_gl_ctx[2]; /* GL jobs can only run on 2 of the 3 job slots */
+ spinlock_t lock;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ struct hrtimer timer;
+ bool timer_active;
+#endif
+
+ void *platform_data;
+ struct kbase_device *kbdev;
+};
+
+union kbase_pm_policy_data {
+ struct kbasep_pm_policy_always_on always_on;
+ struct kbasep_pm_policy_coarse_demand coarse_demand;
+ struct kbasep_pm_policy_demand demand;
+#if !MALI_CUSTOMER_RELEASE
+ struct kbasep_pm_policy_demand_always_powered demand_always_powered;
+ struct kbasep_pm_policy_fast_start fast_start;
+#endif
+};
+
+union kbase_pm_ca_policy_data {
+ struct kbasep_pm_ca_policy_fixed fixed;
+#if !MALI_CUSTOMER_RELEASE
+ struct kbasep_pm_ca_policy_random random;
+#endif
+};
+
+/**
+ * struct kbase_pm_backend_data - Data stored per device for power management.
+ *
+ * This structure contains data for the power management framework. There is one
+ * instance of this structure per device in the system.
+ *
+ * @ca_current_policy: The policy that is currently actively controlling core
+ * availability.
+ * @pm_current_policy: The policy that is currently actively controlling the
+ * power state.
+ * @ca_policy_data: Private data for current CA policy
+ * @pm_policy_data: Private data for current PM policy
+ * @ca_in_transition: Flag indicating when core availability policy is
+ * transitioning cores. The core availability policy must
+ * set this when a change in core availability is occurring.
+ * power_change_lock must be held when accessing this.
+ * @reset_done: Flag when a reset is complete
+ * @reset_done_wait: Wait queue to wait for changes to @reset_done
+ * @l2_powered_wait: Wait queue for whether the l2 cache has been powered as
+ * requested
+ * @l2_powered: State indicating whether all the l2 caches are powered.
+ * Non-zero indicates they're *all* powered
+ * Zero indicates that some (or all) are not powered
+ * @gpu_cycle_counter_requests: The reference count of active gpu cycle counter
+ * users
+ * @gpu_cycle_counter_requests_lock: Lock to protect @gpu_cycle_counter_requests
+ * @desired_shader_state: A bit mask identifying the shader cores that the
+ * power policy would like to be on. The current state
+ * of the cores may be different, but there should be
+ * transitions in progress that will eventually achieve
+ * this state (assuming that the policy doesn't change
+ * its mind in the mean time).
+ * @powering_on_shader_state: A bit mask indicating which shader cores are
+ * currently in a power-on transition
+ * @desired_tiler_state: A bit mask identifying the tiler cores that the power
+ * policy would like to be on. See @desired_shader_state
+ * @powering_on_tiler_state: A bit mask indicating which tiler core are
+ * currently in a power-on transition
+ * @powering_on_l2_state: A bit mask indicating which l2-caches are currently
+ * in a power-on transition
+ * @gpu_in_desired_state: This flag is set if the GPU is powered as requested
+ * by the desired_xxx_state variables
+ * @gpu_in_desired_state_wait: Wait queue set when @gpu_in_desired_state != 0
+ * @gpu_powered: Set to true when the GPU is powered and register
+ * accesses are possible, false otherwise
+ * @instr_enabled: Set to true when instrumentation is enabled,
+ * false otherwise
+ * @cg1_disabled: Set if the policy wants to keep the second core group
+ * powered off
+ * @driver_ready_for_irqs: Debug state indicating whether sufficient
+ * initialization of the driver has occurred to handle
+ * IRQs
+ * @gpu_powered_lock: Spinlock that must be held when writing @gpu_powered or
+ * accessing @driver_ready_for_irqs
+ * @metrics: Structure to hold metrics for the GPU
+ * @gpu_poweroff_pending: number of poweroff timer ticks until the GPU is
+ * powered off
+ * @shader_poweroff_pending_time: number of poweroff timer ticks until shaders
+ * and/or timers are powered off
+ * @gpu_poweroff_timer: Timer for powering off GPU
+ * @gpu_poweroff_wq: Workqueue to power off GPU on when timer fires
+ * @gpu_poweroff_work: Workitem used on @gpu_poweroff_wq
+ * @shader_poweroff_pending: Bit mask of shaders to be powered off on next
+ * timer callback
+ * @tiler_poweroff_pending: Bit mask of tilers to be powered off on next timer
+ * callback
+ * @poweroff_timer_needed: true if the poweroff timer is currently required,
+ * false otherwise
+ * @poweroff_timer_running: true if the poweroff timer is currently running,
+ * false otherwise
+ * power_change_lock should be held when accessing,
+ * unless there is no way the timer can be running (eg
+ * hrtimer_cancel() was called immediately before)
+ * @poweroff_wait_in_progress: true if a wait for GPU power off is in progress.
+ * hwaccess_lock must be held when accessing
+ * @poweron_required: true if a GPU power on is required. Should only be set
+ * when poweroff_wait_in_progress is true, and therefore the
+ * GPU can not immediately be powered on. pm.lock must be
+ * held when accessing
+ * @poweroff_is_suspend: true if the GPU is being powered off due to a suspend
+ * request. pm.lock must be held when accessing
+ * @gpu_poweroff_wait_wq: workqueue for waiting for GPU to power off
+ * @gpu_poweroff_wait_work: work item for use with @gpu_poweroff_wait_wq
+ * @poweroff_wait: waitqueue for waiting for @gpu_poweroff_wait_work to complete
+ * @callback_power_on: Callback when the GPU needs to be turned on. See
+ * &struct kbase_pm_callback_conf
+ * @callback_power_off: Callback when the GPU may be turned off. See
+ * &struct kbase_pm_callback_conf
+ * @callback_power_suspend: Callback when a suspend occurs and the GPU needs to
+ * be turned off. See &struct kbase_pm_callback_conf
+ * @callback_power_resume: Callback when a resume occurs and the GPU needs to
+ * be turned on. See &struct kbase_pm_callback_conf
+ * @callback_power_runtime_on: Callback when the GPU needs to be turned on. See
+ * &struct kbase_pm_callback_conf
+ * @callback_power_runtime_off: Callback when the GPU may be turned off. See
+ * &struct kbase_pm_callback_conf
+ * @callback_power_runtime_idle: Optional callback when the GPU may be idle. See
+ * &struct kbase_pm_callback_conf
+ *
+ * Note:
+ * During an IRQ, @ca_current_policy or @pm_current_policy can be NULL when the
+ * policy is being changed with kbase_pm_ca_set_policy() or
+ * kbase_pm_set_policy(). The change is protected under
+ * kbase_device.pm.power_change_lock. Direct access to this
+ * from IRQ context must therefore check for NULL. If NULL, then
+ * kbase_pm_ca_set_policy() or kbase_pm_set_policy() will re-issue the policy
+ * functions that would have been done under IRQ.
+ */
+struct kbase_pm_backend_data {
+ const struct kbase_pm_ca_policy *ca_current_policy;
+ const struct kbase_pm_policy *pm_current_policy;
+ union kbase_pm_ca_policy_data ca_policy_data;
+ union kbase_pm_policy_data pm_policy_data;
+ bool ca_in_transition;
+ bool reset_done;
+ wait_queue_head_t reset_done_wait;
+ wait_queue_head_t l2_powered_wait;
+ int l2_powered;
+ int gpu_cycle_counter_requests;
+ spinlock_t gpu_cycle_counter_requests_lock;
+
+ u64 desired_shader_state;
+ u64 powering_on_shader_state;
+ u64 desired_tiler_state;
+ u64 powering_on_tiler_state;
+ u64 powering_on_l2_state;
+
+ bool gpu_in_desired_state;
+ wait_queue_head_t gpu_in_desired_state_wait;
+
+ bool gpu_powered;
+
+ bool instr_enabled;
+
+ bool cg1_disabled;
+
+#ifdef CONFIG_MALI_DEBUG
+ bool driver_ready_for_irqs;
+#endif /* CONFIG_MALI_DEBUG */
+
+ spinlock_t gpu_powered_lock;
+
+
+ struct kbasep_pm_metrics_data metrics;
+
+ int gpu_poweroff_pending;
+ int shader_poweroff_pending_time;
+
+ struct hrtimer gpu_poweroff_timer;
+ struct workqueue_struct *gpu_poweroff_wq;
+ struct work_struct gpu_poweroff_work;
+
+ u64 shader_poweroff_pending;
+ u64 tiler_poweroff_pending;
+
+ bool poweroff_timer_needed;
+ bool poweroff_timer_running;
+
+ bool poweroff_wait_in_progress;
+ bool poweron_required;
+ bool poweroff_is_suspend;
+
+ struct workqueue_struct *gpu_poweroff_wait_wq;
+ struct work_struct gpu_poweroff_wait_work;
+
+ wait_queue_head_t poweroff_wait;
+
+ int (*callback_power_on)(struct kbase_device *kbdev);
+ void (*callback_power_off)(struct kbase_device *kbdev);
+ void (*callback_power_suspend)(struct kbase_device *kbdev);
+ void (*callback_power_resume)(struct kbase_device *kbdev);
+ int (*callback_power_runtime_on)(struct kbase_device *kbdev);
+ void (*callback_power_runtime_off)(struct kbase_device *kbdev);
+ int (*callback_power_runtime_idle)(struct kbase_device *kbdev);
+};
+
+
+/* List of policy IDs */
+enum kbase_pm_policy_id {
+ KBASE_PM_POLICY_ID_DEMAND = 1,
+ KBASE_PM_POLICY_ID_ALWAYS_ON,
+ KBASE_PM_POLICY_ID_COARSE_DEMAND,
+#if !MALI_CUSTOMER_RELEASE
+ KBASE_PM_POLICY_ID_DEMAND_ALWAYS_POWERED,
+ KBASE_PM_POLICY_ID_FAST_START
+#endif
+};
+
+typedef u32 kbase_pm_policy_flags;
+
+/**
+ * struct kbase_pm_policy - Power policy structure.
+ *
+ * Each power policy exposes a (static) instance of this structure which
+ * contains function pointers to the policy's methods.
+ *
+ * @name: The name of this policy
+ * @init: Function called when the policy is selected
+ * @term: Function called when the policy is unselected
+ * @get_core_mask: Function called to get the current shader core mask
+ * @get_core_active: Function called to get the current overall GPU power
+ * state
+ * @flags: Field indicating flags for this policy
+ * @id: Field indicating an ID for this policy. This is not
+ * necessarily the same as its index in the list returned
+ * by kbase_pm_list_policies().
+ * It is used purely for debugging.
+ */
+struct kbase_pm_policy {
+ char *name;
+
+ /**
+ * Function called when the policy is selected
+ *
+ * This should initialize the kbdev->pm.pm_policy_data structure. It
+ * should not attempt to make any changes to hardware state.
+ *
+ * It is undefined what state the cores are in when the function is
+ * called.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ */
+ void (*init)(struct kbase_device *kbdev);
+
+ /**
+ * Function called when the policy is unselected.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ */
+ void (*term)(struct kbase_device *kbdev);
+
+ /**
+ * Function called to get the current shader core mask
+ *
+ * The returned mask should meet or exceed (kbdev->shader_needed_bitmap
+ * | kbdev->shader_inuse_bitmap).
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ *
+ * Return: The mask of shader cores to be powered
+ */
+ u64 (*get_core_mask)(struct kbase_device *kbdev);
+
+ /**
+ * Function called to get the current overall GPU power state
+ *
+ * This function should consider the state of kbdev->pm.active_count. If
+ * this count is greater than 0 then there is at least one active
+ * context on the device and the GPU should be powered. If it is equal
+ * to 0 then there are no active contexts and the GPU could be powered
+ * off if desired.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ *
+ * Return: true if the GPU should be powered, false otherwise
+ */
+ bool (*get_core_active)(struct kbase_device *kbdev);
+
+ kbase_pm_policy_flags flags;
+ enum kbase_pm_policy_id id;
+};
+
+
+enum kbase_pm_ca_policy_id {
+ KBASE_PM_CA_POLICY_ID_FIXED = 1,
+ KBASE_PM_CA_POLICY_ID_RANDOM
+};
+
+typedef u32 kbase_pm_ca_policy_flags;
+
+/**
+ * struct kbase_pm_ca_policy - Core availability policy structure.
+ *
+ * Each core availability policy exposes a (static) instance of this structure
+ * which contains function pointers to the policy's methods.
+ *
+ * @name: The name of this policy
+ * @init: Function called when the policy is selected
+ * @term: Function called when the policy is unselected
+ * @get_core_mask: Function called to get the current shader core
+ * availability mask
+ * @update_core_status: Function called to update the current core status
+ * @flags: Field indicating flags for this policy
+ * @id: Field indicating an ID for this policy. This is not
+ * necessarily the same as its index in the list returned
+ * by kbase_pm_list_policies().
+ * It is used purely for debugging.
+ */
+struct kbase_pm_ca_policy {
+ char *name;
+
+ /**
+ * Function called when the policy is selected
+ *
+ * This should initialize the kbdev->pm.ca_policy_data structure. It
+ * should not attempt to make any changes to hardware state.
+ *
+ * It is undefined what state the cores are in when the function is
+ * called.
+ *
+ * @kbdev The kbase device structure for the device (must be a
+ * valid pointer)
+ */
+ void (*init)(struct kbase_device *kbdev);
+
+ /**
+ * Function called when the policy is unselected.
+ *
+ * @kbdev The kbase device structure for the device (must be a
+ * valid pointer)
+ */
+ void (*term)(struct kbase_device *kbdev);
+
+ /**
+ * Function called to get the current shader core availability mask
+ *
+ * When a change in core availability is occurring, the policy must set
+ * kbdev->pm.ca_in_transition to true. This is to indicate that
+ * reporting changes in power state cannot be optimized out, even if
+ * kbdev->pm.desired_shader_state remains unchanged. This must be done
+ * by any functions internal to the Core Availability Policy that change
+ * the return value of kbase_pm_ca_policy::get_core_mask.
+ *
+ * @kbdev The kbase device structure for the device (must be a
+ * valid pointer)
+ *
+ * Return: The current core availability mask
+ */
+ u64 (*get_core_mask)(struct kbase_device *kbdev);
+
+ /**
+ * Function called to update the current core status
+ *
+ * If none of the cores in core group 0 are ready or transitioning, then
+ * the policy must ensure that the next call to get_core_mask does not
+ * return 0 for all cores in core group 0. It is an error to disable
+ * core group 0 through the core availability policy.
+ *
+ * When a change in core availability has finished, the policy must set
+ * kbdev->pm.ca_in_transition to false. This is to indicate that
+ * changes in power state can once again be optimized out when
+ * kbdev->pm.desired_shader_state is unchanged.
+ *
+ * @kbdev: The kbase device structure for the device
+ * (must be a valid pointer)
+ * @cores_ready: The mask of cores currently powered and
+ * ready to run jobs
+ * @cores_transitioning: The mask of cores currently transitioning
+ * power state
+ */
+ void (*update_core_status)(struct kbase_device *kbdev, u64 cores_ready,
+ u64 cores_transitioning);
+
+ kbase_pm_ca_policy_flags flags;
+
+ /**
+ * Field indicating an ID for this policy. This is not necessarily the
+ * same as its index in the list returned by kbase_pm_list_policies().
+ * It is used purely for debugging.
+ */
+ enum kbase_pm_ca_policy_id id;
+};
+
+#endif /* _KBASE_PM_HWACCESS_DEFS_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_demand.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_demand.c
new file mode 100755
index 0000000..81322fd
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_demand.c
@@ -0,0 +1,73 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * A simple demand based power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static u64 demand_get_core_mask(struct kbase_device *kbdev)
+{
+ u64 desired = kbdev->shader_needed_bitmap | kbdev->shader_inuse_bitmap;
+
+ if (0 == kbdev->pm.active_count)
+ return 0;
+
+ return desired;
+}
+
+static bool demand_get_core_active(struct kbase_device *kbdev)
+{
+ if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
+ kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
+ && !kbdev->tiler_inuse_cnt)
+ return false;
+
+ return true;
+}
+
+static void demand_init(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static void demand_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_demand_policy_ops = {
+ "demand", /* name */
+ demand_init, /* init */
+ demand_term, /* term */
+ demand_get_core_mask, /* get_core_mask */
+ demand_get_core_active, /* get_core_active */
+ 0u, /* flags */
+ KBASE_PM_POLICY_ID_DEMAND, /* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_demand_policy_ops);
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_demand.h b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_demand.h
new file mode 100755
index 0000000..c0c84b6
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_demand.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * A simple demand based power management policy
+ */
+
+#ifndef MALI_KBASE_PM_DEMAND_H
+#define MALI_KBASE_PM_DEMAND_H
+
+/**
+ * DOC: Demand power management policy
+ *
+ * The demand power management policy has the following characteristics:
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ * know which Job Chains are to be run:
+ * - The Shader Cores are not powered up
+ *
+ * - When KBase indicates that a set of Shader Cores are needed to submit the
+ * currently queued Job Chains:
+ * - Only those Shader Cores are powered up
+ *
+ * - When KBase indicates that the GPU need not be powered:
+ * - The Shader Cores are powered off, and the GPU itself is powered off too.
+ *
+ * Note:
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ * has just started to submit Job Chains.
+ *
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ * User Processes have finished, and it is waiting for a User Process to
+ * submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_demand - Private structure for policy instance data
+ *
+ * @dummy: No state is needed, a dummy variable
+ *
+ * This contains data that is private to the demand power policy.
+ */
+struct kbasep_pm_policy_demand {
+ int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_demand_policy_ops;
+
+#endif /* MALI_KBASE_PM_DEMAND_H */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_driver.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_driver.c
new file mode 100755
index 0000000..a162ff8
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_driver.c
@@ -0,0 +1,1548 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel Power Management hardware control
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_midg_regmap.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_smc.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_cache_policy_backend.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <linux/of.h>
+
+#if MALI_MOCK_TEST
+#define MOCKABLE(function) function##_original
+#else
+#define MOCKABLE(function) function
+#endif /* MALI_MOCK_TEST */
+
+/* Special value to indicate that the JM_CONFIG reg isn't currently used. */
+#define KBASE_JM_CONFIG_UNUSED (1<<31)
+
+/**
+ * enum kbasep_pm_action - Actions that can be performed on a core.
+ *
+ * This enumeration is private to the file. Its values are set to allow
+ * core_type_to_reg() function, which decodes this enumeration, to be simpler
+ * and more efficient.
+ *
+ * @ACTION_PRESENT: The cores that are present
+ * @ACTION_READY: The cores that are ready
+ * @ACTION_PWRON: Power on the cores specified
+ * @ACTION_PWROFF: Power off the cores specified
+ * @ACTION_PWRTRANS: The cores that are transitioning
+ * @ACTION_PWRACTIVE: The cores that are active
+ */
+enum kbasep_pm_action {
+ ACTION_PRESENT = 0,
+ ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
+ ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
+ ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
+ ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
+ ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
+};
+
+static u64 kbase_pm_get_state(
+ struct kbase_device *kbdev,
+ enum kbase_pm_core_type core_type,
+ enum kbasep_pm_action action);
+
+/**
+ * core_type_to_reg - Decode a core type and action to a register.
+ *
+ * Given a core type (defined by kbase_pm_core_type) and an action (defined
+ * by kbasep_pm_action) this function will return the register offset that
+ * will perform the action on the core type. The register returned is the _LO
+ * register and an offset must be applied to use the _HI register.
+ *
+ * @core_type: The type of core
+ * @action: The type of action
+ *
+ * Return: The register offset of the _LO register that performs an action of
+ * type @action on a core of type @core_type.
+ */
+static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
+ enum kbasep_pm_action action)
+{
+ return (u32)core_type + (u32)action;
+}
+
+#ifdef CONFIG_ARM64
+static void mali_cci_flush_l2(struct kbase_device *kbdev)
+{
+ const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
+ u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+ u32 raw;
+
+ /*
+ * Note that we don't take the cache flush mutex here since
+ * we expect to be the last user of the L2, all other L2 users
+ * would have dropped their references, to initiate L2 power
+ * down, L2 power down being the only valid place for this
+ * to be called from.
+ */
+
+ kbase_reg_write(kbdev,
+ GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CLEAN_INV_CACHES,
+ NULL);
+
+ raw = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
+ NULL);
+
+ /* Wait for cache flush to complete before continuing, exit on
+ * gpu resets or loop expiry. */
+ while (((raw & mask) == 0) && --loops) {
+ raw = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
+ NULL);
+ }
+}
+#endif
+
+/**
+ * kbase_pm_invoke - Invokes an action on a core set
+ *
+ * This function performs the action given by @action on a set of cores of a
+ * type given by @core_type. It is a static function used by
+ * kbase_pm_transition_core_type()
+ *
+ * @kbdev: The kbase device structure of the device
+ * @core_type: The type of core that the action should be performed on
+ * @cores: A bit mask of cores to perform the action on (low 32 bits)
+ * @action: The action to perform on the cores
+ */
+static void kbase_pm_invoke(struct kbase_device *kbdev,
+ enum kbase_pm_core_type core_type,
+ u64 cores,
+ enum kbasep_pm_action action)
+{
+ u32 reg;
+ u32 lo = cores & 0xFFFFFFFF;
+ u32 hi = (cores >> 32) & 0xFFFFFFFF;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ reg = core_type_to_reg(core_type, action);
+
+ KBASE_DEBUG_ASSERT(reg);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ if (cores) {
+ if (action == ACTION_PWRON)
+ kbase_trace_mali_pm_power_on(core_type, cores);
+ else if (action == ACTION_PWROFF)
+ kbase_trace_mali_pm_power_off(core_type, cores);
+ }
+#endif
+
+ if (cores) {
+ u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY);
+
+ if (action == ACTION_PWRON)
+ state |= cores;
+ else if (action == ACTION_PWROFF)
+ state &= ~cores;
+ kbase_tlstream_aux_pm_state(core_type, state);
+ }
+
+ /* Tracing */
+ if (cores) {
+ if (action == ACTION_PWRON)
+ switch (core_type) {
+ case KBASE_PM_CORE_SHADER:
+ KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u,
+ lo);
+ break;
+ case KBASE_PM_CORE_TILER:
+ KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL,
+ NULL, 0u, lo);
+ break;
+ case KBASE_PM_CORE_L2:
+ KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL,
+ 0u, lo);
+ break;
+ default:
+ break;
+ }
+ else if (action == ACTION_PWROFF)
+ switch (core_type) {
+ case KBASE_PM_CORE_SHADER:
+ KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL,
+ 0u, lo);
+ break;
+ case KBASE_PM_CORE_TILER:
+ KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL,
+ NULL, 0u, lo);
+ break;
+ case KBASE_PM_CORE_L2:
+ KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL,
+ 0u, lo);
+ /* disable snoops before L2 is turned off */
+ kbase_pm_cache_snoop_disable(kbdev);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (lo != 0)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo, NULL);
+
+ if (hi != 0)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi, NULL);
+}
+
+/**
+ * kbase_pm_get_state - Get information about a core set
+ *
+ * This function gets information (chosen by @action) about a set of cores of
+ * a type given by @core_type. It is a static function used by
+ * kbase_pm_get_present_cores(), kbase_pm_get_active_cores(),
+ * kbase_pm_get_trans_cores() and kbase_pm_get_ready_cores().
+ *
+ * @kbdev: The kbase device structure of the device
+ * @core_type: The type of core that the should be queried
+ * @action: The property of the cores to query
+ *
+ * Return: A bit mask specifying the state of the cores
+ */
+static u64 kbase_pm_get_state(struct kbase_device *kbdev,
+ enum kbase_pm_core_type core_type,
+ enum kbasep_pm_action action)
+{
+ u32 reg;
+ u32 lo, hi;
+
+ reg = core_type_to_reg(core_type, action);
+
+ KBASE_DEBUG_ASSERT(reg);
+
+ lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg), NULL);
+ hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4), NULL);
+
+ return (((u64) hi) << 32) | ((u64) lo);
+}
+
+void kbasep_pm_read_present_cores(struct kbase_device *kbdev)
+{
+ kbdev->shader_inuse_bitmap = 0;
+ kbdev->shader_needed_bitmap = 0;
+ kbdev->shader_available_bitmap = 0;
+ kbdev->tiler_available_bitmap = 0;
+ kbdev->l2_users_count = 0;
+ kbdev->l2_available_bitmap = 0;
+ kbdev->tiler_needed_cnt = 0;
+ kbdev->tiler_inuse_cnt = 0;
+
+ memset(kbdev->shader_needed_cnt, 0, sizeof(kbdev->shader_needed_cnt));
+}
+
+KBASE_EXPORT_TEST_API(kbasep_pm_read_present_cores);
+
+/**
+ * kbase_pm_get_present_cores - Get the cores that are present
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of the cores that are present
+ */
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ switch (type) {
+ case KBASE_PM_CORE_L2:
+ return kbdev->gpu_props.props.raw_props.l2_present;
+ case KBASE_PM_CORE_SHADER:
+ return kbdev->gpu_props.props.raw_props.shader_present;
+ case KBASE_PM_CORE_TILER:
+ return kbdev->gpu_props.props.raw_props.tiler_present;
+ }
+ KBASE_DEBUG_ASSERT(0);
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores);
+
+/**
+ * kbase_pm_get_active_cores - Get the cores that are "active"
+ * (busy processing work)
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are active
+ */
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type)
+{
+ return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores);
+
+/**
+ * kbase_pm_get_trans_cores - Get the cores that are transitioning between
+ * power states
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are transitioning
+ */
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type)
+{
+ return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores);
+
+/**
+ * kbase_pm_get_ready_cores - Get the cores that are powered on
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are ready (powered on)
+ */
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type)
+{
+ u64 result;
+
+ result = kbase_pm_get_state(kbdev, type, ACTION_READY);
+
+ switch (type) {
+ case KBASE_PM_CORE_SHADER:
+ KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u,
+ (u32) result);
+ break;
+ case KBASE_PM_CORE_TILER:
+ KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u,
+ (u32) result);
+ break;
+ case KBASE_PM_CORE_L2:
+ KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u,
+ (u32) result);
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores);
+
+/**
+ * kbase_pm_transition_core_type - Perform power transitions for a particular
+ * core type.
+ *
+ * This function will perform any available power transitions to make the actual
+ * hardware state closer to the desired state. If a core is currently
+ * transitioning then changes to the power state of that call cannot be made
+ * until the transition has finished. Cores which are not present in the
+ * hardware are ignored if they are specified in the desired_state bitmask,
+ * however the return value will always be 0 in this case.
+ *
+ * @kbdev: The kbase device
+ * @type: The core type to perform transitions for
+ * @desired_state: A bit mask of the desired state of the cores
+ * @in_use: A bit mask of the cores that are currently running
+ * jobs. These cores have to be kept powered up because
+ * there are jobs running (or about to run) on them.
+ * @available: Receives a bit mask of the cores that the job
+ * scheduler can use to submit jobs to. May be NULL if
+ * this is not needed.
+ * @powering_on: Bit mask to update with cores that are
+ * transitioning to a power-on state.
+ *
+ * Return: true if the desired state has been reached, false otherwise
+ */
+static bool kbase_pm_transition_core_type(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type,
+ u64 desired_state,
+ u64 in_use,
+ u64 * const available,
+ u64 *powering_on)
+{
+ u64 present;
+ u64 ready;
+ u64 trans;
+ u64 powerup;
+ u64 powerdown;
+ u64 powering_on_trans;
+ u64 desired_state_in_use;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* Get current state */
+ present = kbase_pm_get_present_cores(kbdev, type);
+ trans = kbase_pm_get_trans_cores(kbdev, type);
+ ready = kbase_pm_get_ready_cores(kbdev, type);
+ /* mask off ready from trans in case transitions finished between the
+ * register reads */
+ trans &= ~ready;
+
+ powering_on_trans = trans & *powering_on;
+ *powering_on = powering_on_trans;
+
+ if (available != NULL)
+ *available = (ready | powering_on_trans) & desired_state;
+
+ /* Update desired state to include the in-use cores. These have to be
+ * kept powered up because there are jobs running or about to run on
+ * these cores
+ */
+ desired_state_in_use = desired_state | in_use;
+
+ /* Update state of whether l2 caches are powered */
+ if (type == KBASE_PM_CORE_L2) {
+ if ((ready == present) && (desired_state_in_use == ready) &&
+ (trans == 0)) {
+ /* All are ready, none will be turned off, and none are
+ * transitioning */
+ kbdev->pm.backend.l2_powered = 1;
+ /*
+ * Ensure snoops are enabled after L2 is powered up,
+ * note that kbase keeps track of the snoop state, so
+ * safe to repeatedly call.
+ */
+ kbase_pm_cache_snoop_enable(kbdev);
+ if (kbdev->l2_users_count > 0) {
+ /* Notify any registered l2 cache users
+ * (optimized out when no users waiting) */
+ wake_up(&kbdev->pm.backend.l2_powered_wait);
+ }
+ } else
+ kbdev->pm.backend.l2_powered = 0;
+ }
+
+ if (desired_state == ready && (trans == 0))
+ return true;
+
+ /* Restrict the cores to those that are actually present */
+ powerup = desired_state_in_use & present;
+ powerdown = (~desired_state_in_use) & present;
+
+ /* Restrict to cores that are not already in the desired state */
+ powerup &= ~ready;
+ powerdown &= ready;
+
+ /* Don't transition any cores that are already transitioning, except for
+ * Mali cores that support the following case:
+ *
+ * If the SHADER_PWRON or TILER_PWRON registers are written to turn on
+ * a core that is currently transitioning to power off, then this is
+ * remembered and the shader core is automatically powered up again once
+ * the original transition completes. Once the automatic power on is
+ * complete any job scheduled on the shader core should start.
+ */
+ powerdown &= ~trans;
+
+ if (kbase_hw_has_feature(kbdev,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS))
+ if (KBASE_PM_CORE_SHADER == type || KBASE_PM_CORE_TILER == type)
+ trans = powering_on_trans; /* for exception cases, only
+ * mask off cores in power on
+ * transitions */
+
+ powerup &= ~trans;
+
+ /* Perform transitions if any */
+ kbase_pm_invoke(kbdev, type, powerup, ACTION_PWRON);
+ kbase_pm_invoke(kbdev, type, powerdown, ACTION_PWROFF);
+
+ /* Recalculate cores transitioning on, and re-evaluate our state */
+ powering_on_trans |= powerup;
+ *powering_on = powering_on_trans;
+ if (available != NULL)
+ *available = (ready | powering_on_trans) & desired_state;
+
+ return false;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_transition_core_type);
+
+/**
+ * get_desired_cache_status - Determine which caches should be on for a
+ * particular core state
+ *
+ * This function takes a bit mask of the present caches and the cores (or
+ * caches) that are attached to the caches that will be powered. It then
+ * computes which caches should be turned on to allow the cores requested to be
+ * powered up.
+ *
+ * @present: The bit mask of present caches
+ * @cores_powered: A bit mask of cores (or L2 caches) that are desired to
+ * be powered
+ * @tilers_powered: The bit mask of tilers that are desired to be powered
+ *
+ * Return: A bit mask of the caches that should be turned on
+ */
+static u64 get_desired_cache_status(u64 present, u64 cores_powered,
+ u64 tilers_powered)
+{
+ u64 desired = 0;
+
+ while (present) {
+ /* Find out which is the highest set bit */
+ u64 bit = fls64(present) - 1;
+ u64 bit_mask = 1ull << bit;
+ /* Create a mask which has all bits from 'bit' upwards set */
+
+ u64 mask = ~(bit_mask - 1);
+
+ /* If there are any cores powered at this bit or above (that
+ * haven't previously been processed) then we need this core on
+ */
+ if (cores_powered & mask)
+ desired |= bit_mask;
+
+ /* Remove bits from cores_powered and present */
+ cores_powered &= ~mask;
+ present &= ~bit_mask;
+ }
+
+ /* Power up the required L2(s) for the tiler */
+ if (tilers_powered)
+ desired |= 1;
+
+ return desired;
+}
+
+KBASE_EXPORT_TEST_API(get_desired_cache_status);
+
+bool
+MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
+{
+ bool cores_are_available = false;
+ bool in_desired_state = true;
+ u64 desired_l2_state;
+ u64 cores_powered;
+ u64 tilers_powered;
+ u64 tiler_available_bitmap;
+ u64 shader_available_bitmap;
+ u64 shader_ready_bitmap;
+ u64 shader_transitioning_bitmap;
+ u64 l2_available_bitmap;
+ u64 prev_l2_available_bitmap;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ spin_lock(&kbdev->pm.backend.gpu_powered_lock);
+ if (kbdev->pm.backend.gpu_powered == false) {
+ spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+ if (kbdev->pm.backend.desired_shader_state == 0 &&
+ kbdev->pm.backend.desired_tiler_state == 0)
+ return true;
+ return false;
+ }
+
+ /* Trace that a change-state is being requested, and that it took
+ * (effectively) no time to start it. This is useful for counting how
+ * many state changes occurred, in a way that's backwards-compatible
+ * with processing the trace data */
+ kbase_timeline_pm_send_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
+ kbase_timeline_pm_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
+
+ /* If any cores are already powered then, we must keep the caches on */
+ cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
+
+ cores_powered |= kbdev->pm.backend.desired_shader_state;
+
+ /* Work out which tilers want to be powered */
+ tilers_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_TILER);
+ tilers_powered |= kbdev->pm.backend.desired_tiler_state;
+
+ /* If there are l2 cache users registered, keep all l2s powered even if
+ * all other cores are off. */
+ if (kbdev->l2_users_count > 0)
+ cores_powered |= kbdev->gpu_props.props.raw_props.l2_present;
+
+ desired_l2_state = get_desired_cache_status(
+ kbdev->gpu_props.props.raw_props.l2_present,
+ cores_powered, tilers_powered);
+
+ /* If any l2 cache is on, then enable l2 #0, for use by job manager */
+ if (0 != desired_l2_state)
+ desired_l2_state |= 1;
+
+ prev_l2_available_bitmap = kbdev->l2_available_bitmap;
+ in_desired_state &= kbase_pm_transition_core_type(kbdev,
+ KBASE_PM_CORE_L2, desired_l2_state, 0,
+ &l2_available_bitmap,
+ &kbdev->pm.backend.powering_on_l2_state);
+
+ if (kbdev->l2_available_bitmap != l2_available_bitmap)
+ KBASE_TIMELINE_POWER_L2(kbdev, l2_available_bitmap);
+
+ kbdev->l2_available_bitmap = l2_available_bitmap;
+
+ if (in_desired_state) {
+ in_desired_state &= kbase_pm_transition_core_type(kbdev,
+ KBASE_PM_CORE_TILER,
+ kbdev->pm.backend.desired_tiler_state,
+ 0, &tiler_available_bitmap,
+ &kbdev->pm.backend.powering_on_tiler_state);
+ in_desired_state &= kbase_pm_transition_core_type(kbdev,
+ KBASE_PM_CORE_SHADER,
+ kbdev->pm.backend.desired_shader_state,
+ kbdev->shader_inuse_bitmap,
+ &shader_available_bitmap,
+ &kbdev->pm.backend.powering_on_shader_state);
+
+ if (kbdev->shader_available_bitmap != shader_available_bitmap) {
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
+ NULL, 0u,
+ (u32) shader_available_bitmap);
+ KBASE_TIMELINE_POWER_SHADER(kbdev,
+ shader_available_bitmap);
+ }
+
+ kbdev->shader_available_bitmap = shader_available_bitmap;
+
+ if (kbdev->tiler_available_bitmap != tiler_available_bitmap) {
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
+ NULL, NULL, 0u,
+ (u32) tiler_available_bitmap);
+ KBASE_TIMELINE_POWER_TILER(kbdev,
+ tiler_available_bitmap);
+ }
+
+ kbdev->tiler_available_bitmap = tiler_available_bitmap;
+
+ } else if ((l2_available_bitmap &
+ kbdev->gpu_props.props.raw_props.tiler_present) !=
+ kbdev->gpu_props.props.raw_props.tiler_present) {
+ tiler_available_bitmap = 0;
+
+ if (kbdev->tiler_available_bitmap != tiler_available_bitmap)
+ KBASE_TIMELINE_POWER_TILER(kbdev,
+ tiler_available_bitmap);
+
+ kbdev->tiler_available_bitmap = tiler_available_bitmap;
+ }
+
+ /* State updated for slow-path waiters */
+ kbdev->pm.backend.gpu_in_desired_state = in_desired_state;
+
+ shader_ready_bitmap = kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_SHADER);
+ shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev,
+ KBASE_PM_CORE_SHADER);
+
+ /* Determine whether the cores are now available (even if the set of
+ * available cores is empty). Note that they can be available even if
+ * we've not finished transitioning to the desired state */
+ if ((kbdev->shader_available_bitmap &
+ kbdev->pm.backend.desired_shader_state)
+ == kbdev->pm.backend.desired_shader_state &&
+ (kbdev->tiler_available_bitmap &
+ kbdev->pm.backend.desired_tiler_state)
+ == kbdev->pm.backend.desired_tiler_state) {
+ cores_are_available = true;
+
+ KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE, NULL, NULL, 0u,
+ (u32)(kbdev->shader_available_bitmap &
+ kbdev->pm.backend.desired_shader_state));
+ KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE_TILER, NULL, NULL, 0u,
+ (u32)(kbdev->tiler_available_bitmap &
+ kbdev->pm.backend.desired_tiler_state));
+
+ /* Log timelining information about handling events that power
+ * up cores, to match up either with immediate submission either
+ * because cores already available, or from PM IRQ */
+ if (!in_desired_state)
+ kbase_timeline_pm_send_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+ }
+
+ if (in_desired_state) {
+ KBASE_DEBUG_ASSERT(cores_are_available);
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_pm_status(KBASE_PM_CORE_L2,
+ kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_L2));
+ kbase_trace_mali_pm_status(KBASE_PM_CORE_SHADER,
+ kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_SHADER));
+ kbase_trace_mali_pm_status(KBASE_PM_CORE_TILER,
+ kbase_pm_get_ready_cores(kbdev,
+ KBASE_PM_CORE_TILER));
+#endif
+
+ kbase_tlstream_aux_pm_state(
+ KBASE_PM_CORE_L2,
+ kbase_pm_get_ready_cores(
+ kbdev, KBASE_PM_CORE_L2));
+ kbase_tlstream_aux_pm_state(
+ KBASE_PM_CORE_SHADER,
+ kbase_pm_get_ready_cores(
+ kbdev, KBASE_PM_CORE_SHADER));
+ kbase_tlstream_aux_pm_state(
+ KBASE_PM_CORE_TILER,
+ kbase_pm_get_ready_cores(
+ kbdev,
+ KBASE_PM_CORE_TILER));
+
+ KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL,
+ kbdev->pm.backend.gpu_in_desired_state,
+ (u32)kbdev->pm.backend.desired_shader_state);
+ KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED_TILER, NULL, NULL, 0u,
+ (u32)kbdev->pm.backend.desired_tiler_state);
+
+ /* Log timelining information for synchronous waiters */
+ kbase_timeline_pm_send_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+ /* Wake slow-path waiters. Job scheduler does not use this. */
+ KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
+
+ wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
+ }
+
+ spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+
+ /* kbase_pm_ca_update_core_status can cause one-level recursion into
+ * this function, so it must only be called once all changes to kbdev
+ * have been committed, and after the gpu_powered_lock has been
+ * dropped. */
+ if (kbdev->shader_ready_bitmap != shader_ready_bitmap ||
+ kbdev->shader_transitioning_bitmap != shader_transitioning_bitmap) {
+ kbdev->shader_ready_bitmap = shader_ready_bitmap;
+ kbdev->shader_transitioning_bitmap =
+ shader_transitioning_bitmap;
+
+ kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap,
+ shader_transitioning_bitmap);
+ }
+
+ /* The core availability policy is not allowed to keep core group 0
+ * turned off (unless it was changing the l2 power state) */
+ if (!((shader_ready_bitmap | shader_transitioning_bitmap) &
+ kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
+ (prev_l2_available_bitmap == desired_l2_state) &&
+ !(kbase_pm_ca_get_core_mask(kbdev) &
+ kbdev->gpu_props.props.coherency_info.group[0].core_mask))
+ BUG();
+
+ /* The core availability policy is allowed to keep core group 1 off,
+ * but all jobs specifically targeting CG1 must fail */
+ if (!((shader_ready_bitmap | shader_transitioning_bitmap) &
+ kbdev->gpu_props.props.coherency_info.group[1].core_mask) &&
+ !(kbase_pm_ca_get_core_mask(kbdev) &
+ kbdev->gpu_props.props.coherency_info.group[1].core_mask))
+ kbdev->pm.backend.cg1_disabled = true;
+ else
+ kbdev->pm.backend.cg1_disabled = false;
+
+ return cores_are_available;
+}
+KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_nolock);
+
+/* Timeout for kbase_pm_check_transitions_sync when wait_event_killable has
+ * aborted due to a fatal signal. If the time spent waiting has exceeded this
+ * threshold then there is most likely a hardware issue. */
+#define PM_TIMEOUT (5*HZ) /* 5s */
+
+void kbase_pm_check_transitions_sync(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ unsigned long timeout;
+ bool cores_are_available;
+ int ret;
+
+ /* Force the transition to be checked and reported - the cores may be
+ * 'available' (for job submission) but not fully powered up. */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+
+ /* Don't need 'cores_are_available', because we don't return anything */
+ CSTD_UNUSED(cores_are_available);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ timeout = jiffies + PM_TIMEOUT;
+
+ /* Wait for cores */
+ ret = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
+ kbdev->pm.backend.gpu_in_desired_state);
+
+ if (ret < 0 && time_after(jiffies, timeout)) {
+ dev_err(kbdev->dev, "Power transition timed out unexpectedly\n");
+ dev_err(kbdev->dev, "Desired state :\n");
+ dev_err(kbdev->dev, "\tShader=%016llx\n",
+ kbdev->pm.backend.desired_shader_state);
+ dev_err(kbdev->dev, "\tTiler =%016llx\n",
+ kbdev->pm.backend.desired_tiler_state);
+ dev_err(kbdev->dev, "Current state :\n");
+ dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_READY_HI), NULL),
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(SHADER_READY_LO),
+ NULL));
+ dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_READY_HI), NULL),
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_READY_LO), NULL));
+ dev_err(kbdev->dev, "\tL2 =%08x%08x\n",
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_READY_HI), NULL),
+ kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_READY_LO), NULL));
+ dev_err(kbdev->dev, "Cores transitioning :\n");
+ dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ SHADER_PWRTRANS_HI), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ SHADER_PWRTRANS_LO), NULL));
+ dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ TILER_PWRTRANS_HI), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ TILER_PWRTRANS_LO), NULL));
+ dev_err(kbdev->dev, "\tL2 =%08x%08x\n",
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ L2_PWRTRANS_HI), NULL),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(
+ L2_PWRTRANS_LO), NULL));
+#if KBASE_GPU_RESET_EN
+ dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+#endif /* KBASE_GPU_RESET_EN */
+ } else {
+ /* Log timelining information that a change in state has
+ * completed */
+ kbase_timeline_pm_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+ }
+}
+KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync);
+
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ /*
+ * Clear all interrupts,
+ * and unmask them all.
+ */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
+ NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL,
+ NULL);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
+ NULL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF, NULL);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
+
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ /*
+ * Mask all interrupts,
+ * and clear them all.
+ */
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
+ NULL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
+ NULL);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
+}
+
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_disable_interrupts_nolock(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
+
+
+/*
+ * pmu layout:
+ * 0x0000: PMU TAG (RO) (0xCAFECAFE)
+ * 0x0004: PMU VERSION ID (RO) (0x00000000)
+ * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
+ */
+void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
+{
+ bool reset_required = is_resume;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned long flags;
+ int i;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ if (kbdev->pm.backend.gpu_powered) {
+ /* Already turned on */
+ if (kbdev->poweroff_pending)
+ kbase_pm_enable_interrupts(kbdev);
+ kbdev->poweroff_pending = false;
+ KBASE_DEBUG_ASSERT(!is_resume);
+ return;
+ }
+
+ kbdev->poweroff_pending = false;
+
+ KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u);
+
+ if (is_resume && kbdev->pm.backend.callback_power_resume) {
+ kbdev->pm.backend.callback_power_resume(kbdev);
+ return;
+ } else if (kbdev->pm.backend.callback_power_on) {
+ kbdev->pm.backend.callback_power_on(kbdev);
+ /* If your platform properly keeps the GPU state you may use the
+ * return value of the callback_power_on function to
+ * conditionally reset the GPU on power up. Currently we are
+ * conservative and always reset the GPU. */
+ reset_required = true;
+ }
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+ kbdev->pm.backend.gpu_powered = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (reset_required) {
+ /* GPU state was lost, reset GPU to ensure it is in a
+ * consistent state */
+ kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
+ }
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ /* Reprogram the GPU's MMU */
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (js_devdata->runpool_irq.per_as_data[i].kctx)
+ kbase_mmu_update(
+ js_devdata->runpool_irq.per_as_data[i].kctx);
+ else
+ kbase_mmu_disable_as(kbdev, i);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ /* Lastly, enable the interrupts */
+ kbase_pm_enable_interrupts(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_clock_on);
+
+bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ /* ASSERT that the cores should now be unavailable. No lock needed. */
+ KBASE_DEBUG_ASSERT(kbdev->shader_available_bitmap == 0u);
+
+ kbdev->poweroff_pending = true;
+
+ if (!kbdev->pm.backend.gpu_powered) {
+ /* Already turned off */
+ if (is_suspend && kbdev->pm.backend.callback_power_suspend)
+ kbdev->pm.backend.callback_power_suspend(kbdev);
+ return true;
+ }
+
+ KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
+
+ /* Disable interrupts. This also clears any outstanding interrupts */
+ kbase_pm_disable_interrupts(kbdev);
+ /* Ensure that any IRQ handlers have finished */
+ kbase_synchronize_irqs(kbdev);
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (atomic_read(&kbdev->faults_pending)) {
+ /* Page/bus faults are still being processed. The GPU can not
+ * be powered off until they have completed */
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ flags);
+ return false;
+ }
+
+ kbase_pm_cache_snoop_disable(kbdev);
+
+ /* The GPU power may be turned off from this point */
+ kbdev->pm.backend.gpu_powered = false;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+ if (is_suspend && kbdev->pm.backend.callback_power_suspend)
+ kbdev->pm.backend.callback_power_suspend(kbdev);
+ else if (kbdev->pm.backend.callback_power_off)
+ kbdev->pm.backend.callback_power_off(kbdev);
+ return true;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_clock_off);
+
+struct kbasep_reset_timeout_data {
+ struct hrtimer timer;
+ bool timed_out;
+ struct kbase_device *kbdev;
+};
+
+void kbase_pm_reset_done(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ kbdev->pm.backend.reset_done = true;
+ wake_up(&kbdev->pm.backend.reset_done_wait);
+}
+
+/**
+ * kbase_pm_wait_for_reset - Wait for a reset to happen
+ *
+ * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state.
+ *
+ * @kbdev: Kbase device
+ */
+static void kbase_pm_wait_for_reset(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ wait_event(kbdev->pm.backend.reset_done_wait,
+ (kbdev->pm.backend.reset_done));
+ kbdev->pm.backend.reset_done = false;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_reset_done);
+
+static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
+{
+ struct kbasep_reset_timeout_data *rtdata =
+ container_of(timer, struct kbasep_reset_timeout_data, timer);
+
+ rtdata->timed_out = 1;
+
+ /* Set the wait queue to wake up kbase_pm_init_hw even though the reset
+ * hasn't completed */
+ kbase_pm_reset_done(rtdata->kbdev);
+
+ return HRTIMER_NORESTART;
+}
+
+static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
+{
+ struct device_node *np = kbdev->dev->of_node;
+ u32 jm_values[4];
+ const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+ GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ const u32 major = (gpu_id & GPU_ID_VERSION_MAJOR) >>
+ GPU_ID_VERSION_MAJOR_SHIFT;
+
+ kbdev->hw_quirks_sc = 0;
+
+ /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
+ * and needed due to MIDGLES-3539. See PRLAM-11035 */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
+ kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE;
+
+ /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327.
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
+ kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD;
+
+#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
+ /* Enable alternative hardware counter selection if configured. */
+ if (!GPU_ID_IS_NEW_FORMAT(prod_id))
+ kbdev->hw_quirks_sc |= SC_ALT_COUNTERS;
+#endif
+
+ /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
+ kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS;
+
+ if (!kbase_hw_has_issue(kbdev, GPUCORE_1619)) {
+ if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */
+ kbdev->hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE;
+ else if (prod_id >= 0x750 && prod_id <= 0x880) /* T76x, T8xx */
+ kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
+ }
+
+ kbdev->hw_quirks_tiler = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TILER_CONFIG), NULL);
+
+ /* Set tiler clock gate override if required */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
+ kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
+
+ /* Limit the GPU bus bandwidth if the platform needs this. */
+ kbdev->hw_quirks_mmu = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(L2_MMU_CONFIG), NULL);
+
+ /* Limit read ID width for AXI */
+ kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
+ kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) <<
+ L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
+
+ /* Limit write ID width for AXI */
+ kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
+ kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
+ L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
+
+ if (kbdev->system_coherency == COHERENCY_ACE) {
+ /* Allow memory configuration disparity to be ignored, we
+ * optimize the use of shared memory and thus we expect
+ * some disparity in the memory configuration */
+ kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
+ }
+
+ /* Only for T86x/T88x-based products after r2p0 */
+ if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) {
+ /* The JM_CONFIG register is specified as follows in the
+ T86x/T88x Engineering Specification Supplement:
+ The values are read from device tree in order.
+ */
+#define TIMESTAMP_OVERRIDE 1
+#define CLOCK_GATE_OVERRIDE (1<<1)
+#define JOB_THROTTLE_ENABLE (1<<2)
+#define JOB_THROTTLE_LIMIT_SHIFT 3
+
+ /* 6 bits in the register */
+ const u32 jm_max_limit = 0x3F;
+
+ if (of_property_read_u32_array(np,
+ "jm_config",
+ &jm_values[0],
+ ARRAY_SIZE(jm_values))) {
+ /* Entry not in device tree, use defaults */
+ jm_values[0] = 0;
+ jm_values[1] = 0;
+ jm_values[2] = 0;
+ jm_values[3] = jm_max_limit; /* Max value */
+ }
+
+ /* Limit throttle limit to 6 bits*/
+ if (jm_values[3] > jm_max_limit) {
+ dev_dbg(kbdev->dev, "JOB_THROTTLE_LIMIT supplied in device tree is too large. Limiting to MAX (63).");
+ jm_values[3] = jm_max_limit;
+ }
+
+ /* Aggregate to one integer. */
+ kbdev->hw_quirks_jm = (jm_values[0] ? TIMESTAMP_OVERRIDE : 0);
+ kbdev->hw_quirks_jm |= (jm_values[1] ? CLOCK_GATE_OVERRIDE : 0);
+ kbdev->hw_quirks_jm |= (jm_values[2] ? JOB_THROTTLE_ENABLE : 0);
+ kbdev->hw_quirks_jm |= (jm_values[3] <<
+ JOB_THROTTLE_LIMIT_SHIFT);
+ } else {
+ kbdev->hw_quirks_jm = KBASE_JM_CONFIG_UNUSED;
+ }
+
+
+}
+
+static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
+{
+ if (kbdev->hw_quirks_sc)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG),
+ kbdev->hw_quirks_sc, NULL);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG),
+ kbdev->hw_quirks_tiler, NULL);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
+ kbdev->hw_quirks_mmu, NULL);
+
+
+ if (kbdev->hw_quirks_jm != KBASE_JM_CONFIG_UNUSED)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
+ kbdev->hw_quirks_jm, NULL);
+
+}
+
+void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
+{
+ if ((kbdev->current_gpu_coherency_mode == COHERENCY_ACE) &&
+ !kbdev->cci_snoop_enabled) {
+#ifdef CONFIG_ARM64
+ if (kbdev->snoop_enable_smc != 0)
+ kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
+#endif /* CONFIG_ARM64 */
+ dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n");
+ kbdev->cci_snoop_enabled = true;
+ }
+}
+
+void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
+{
+ if (kbdev->cci_snoop_enabled) {
+#ifdef CONFIG_ARM64
+ if (kbdev->snoop_disable_smc != 0) {
+ mali_cci_flush_l2(kbdev);
+ kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
+ }
+#endif /* CONFIG_ARM64 */
+ dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n");
+ kbdev->cci_snoop_enabled = false;
+ }
+}
+
+static int kbase_pm_reset_do_normal(struct kbase_device *kbdev)
+{
+ struct kbasep_reset_timeout_data rtdata;
+
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
+
+ kbase_tlstream_jd_gpu_soft_reset(kbdev);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_SOFT_RESET, NULL);
+
+ /* Unmask the reset complete interrupt only */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED,
+ NULL);
+
+ /* Initialize a structure for tracking the status of the reset */
+ rtdata.kbdev = kbdev;
+ rtdata.timed_out = 0;
+
+ /* Create a timer to use as a timeout on the reset */
+ hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rtdata.timer.function = kbasep_reset_timeout;
+
+ hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+ HRTIMER_MODE_REL);
+
+ /* Wait for the RESET_COMPLETED interrupt to be raised */
+ kbase_pm_wait_for_reset(kbdev);
+
+ if (rtdata.timed_out == 0) {
+ /* GPU has been reset */
+ hrtimer_cancel(&rtdata.timer);
+ destroy_hrtimer_on_stack(&rtdata.timer);
+ return 0;
+ }
+
+ /* No interrupt has been received - check if the RAWSTAT register says
+ * the reset has completed */
+ if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
+ RESET_COMPLETED) {
+ /* The interrupt is set in the RAWSTAT; this suggests that the
+ * interrupts are not getting to the CPU */
+ dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
+ /* If interrupts aren't working we can't continue. */
+ destroy_hrtimer_on_stack(&rtdata.timer);
+ return -EINVAL;
+ }
+
+ /* The GPU doesn't seem to be responding to the reset so try a hard
+ * reset */
+ dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
+ RESET_TIMEOUT);
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_HARD_RESET, NULL);
+
+ /* Restart the timer to wait for the hard reset to complete */
+ rtdata.timed_out = 0;
+
+ hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+ HRTIMER_MODE_REL);
+
+ /* Wait for the RESET_COMPLETED interrupt to be raised */
+ kbase_pm_wait_for_reset(kbdev);
+
+ if (rtdata.timed_out == 0) {
+ /* GPU has been reset */
+ hrtimer_cancel(&rtdata.timer);
+ destroy_hrtimer_on_stack(&rtdata.timer);
+ return 0;
+ }
+
+ destroy_hrtimer_on_stack(&rtdata.timer);
+
+ dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
+ RESET_TIMEOUT);
+
+ return -EINVAL;
+}
+
+static int kbase_pm_reset_do_protected(struct kbase_device *kbdev)
+{
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
+ kbase_tlstream_jd_gpu_soft_reset(kbdev);
+
+ return kbdev->protected_ops->protected_mode_reset(kbdev);
+}
+
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+{
+ unsigned long irq_flags;
+ int err;
+ bool resume_vinstr = false;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ /* Ensure the clock is on before attempting to access the hardware */
+ if (!kbdev->pm.backend.gpu_powered) {
+ if (kbdev->pm.backend.callback_power_on)
+ kbdev->pm.backend.callback_power_on(kbdev);
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock,
+ irq_flags);
+ kbdev->pm.backend.gpu_powered = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ irq_flags);
+ }
+
+ /* Ensure interrupts are off to begin with, this also clears any
+ * outstanding interrupts */
+ kbase_pm_disable_interrupts(kbdev);
+ /* Ensure cache snoops are disabled before reset. */
+ kbase_pm_cache_snoop_disable(kbdev);
+ /* Prepare for the soft-reset */
+ kbdev->pm.backend.reset_done = false;
+
+ /* The cores should be made unavailable due to the reset */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+ if (kbdev->shader_available_bitmap != 0u)
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
+ NULL, 0u, (u32)0u);
+ if (kbdev->tiler_available_bitmap != 0u)
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
+ NULL, NULL, 0u, (u32)0u);
+ kbdev->shader_available_bitmap = 0u;
+ kbdev->tiler_available_bitmap = 0u;
+ kbdev->l2_available_bitmap = 0u;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+ /* Soft reset the GPU */
+ if (kbdev->protected_mode_support &&
+ kbdev->protected_ops->protected_mode_reset)
+ err = kbase_pm_reset_do_protected(kbdev);
+ else
+ err = kbase_pm_reset_do_normal(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+ if (kbdev->protected_mode)
+ resume_vinstr = true;
+ kbdev->protected_mode = false;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+ if (err)
+ goto exit;
+
+ if (flags & PM_HW_ISSUES_DETECT)
+ kbase_pm_hw_issues_detect(kbdev);
+
+ kbase_pm_hw_issues_apply(kbdev);
+ kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
+
+ /* Sanity check protected mode was left after reset */
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+ u32 gpu_status = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(GPU_STATUS), NULL);
+
+ WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE);
+ }
+
+ /* If cycle counter was in use re-enable it, enable_irqs will only be
+ * false when called from kbase_pm_powerup */
+ if (kbdev->pm.backend.gpu_cycle_counter_requests &&
+ (flags & PM_ENABLE_IRQS)) {
+ /* enable interrupts as the L2 may have to be powered on */
+ kbase_pm_enable_interrupts(kbdev);
+ kbase_pm_request_l2_caches(kbdev);
+
+ /* Re-enable the counters if we need to */
+ spin_lock_irqsave(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ irq_flags);
+ if (kbdev->pm.backend.gpu_cycle_counter_requests)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CYCLE_COUNT_START, NULL);
+ spin_unlock_irqrestore(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ irq_flags);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+ kbase_pm_release_l2_caches(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+ kbase_pm_disable_interrupts(kbdev);
+ }
+
+ if (flags & PM_ENABLE_IRQS)
+ kbase_pm_enable_interrupts(kbdev);
+
+exit:
+ /* If GPU is leaving protected mode resume vinstr operation. */
+ if (kbdev->vinstr_ctx && resume_vinstr)
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+
+ return err;
+}
+
+/**
+ * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters
+ *
+ * Increase the count of cycle counter users and turn the cycle counters on if
+ * they were previously off
+ *
+ * This function is designed to be called by
+ * kbase_pm_request_gpu_cycle_counter() or
+ * kbase_pm_request_gpu_cycle_counter_l2_is_on() only
+ *
+ * When this function is called the l2 cache must be on and the l2 cache users
+ * count must have been incremented by a call to (
+ * kbase_pm_request_l2_caches() or kbase_pm_request_l2_caches_l2_on() )
+ *
+ * @kbdev: The kbase device structure of the device
+ */
+static void
+kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ flags);
+
+ ++kbdev->pm.backend.gpu_cycle_counter_requests;
+
+ if (1 == kbdev->pm.backend.gpu_cycle_counter_requests)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CYCLE_COUNT_START, NULL);
+
+ spin_unlock_irqrestore(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ flags);
+}
+
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+ INT_MAX);
+
+ kbase_pm_request_l2_caches(kbdev);
+
+ kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter);
+
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+ INT_MAX);
+
+ kbase_pm_request_l2_caches_l2_is_on(kbdev);
+
+ kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on);
+
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ flags);
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0);
+
+ --kbdev->pm.backend.gpu_cycle_counter_requests;
+
+ if (0 == kbdev->pm.backend.gpu_cycle_counter_requests)
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
+
+ spin_unlock_irqrestore(
+ &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ flags);
+
+ kbase_pm_release_l2_caches(kbdev);
+}
+
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter);
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_internal.h b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_internal.h
new file mode 100755
index 0000000..ad2667a
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_internal.h
@@ -0,0 +1,550 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Power management API definitions used internally by GPU backend
+ */
+
+#ifndef _KBASE_BACKEND_PM_INTERNAL_H_
+#define _KBASE_BACKEND_PM_INTERNAL_H_
+
+#include <mali_kbase_hwaccess_pm.h>
+
+#include "mali_kbase_pm_ca.h"
+#include "mali_kbase_pm_policy.h"
+
+
+/**
+ * kbase_pm_dev_idle - The GPU is idle.
+ *
+ * The OS may choose to turn off idle devices
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_dev_idle(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_dev_activate - The GPU is active.
+ *
+ * The OS should avoid opportunistically turning off the GPU while it is active
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_dev_activate(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_get_present_cores - Get details of the cores that are present in
+ * the device.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) present in the GPU device and also a count of
+ * the number of cores.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @type: The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of cores present
+ */
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_active_cores - Get details of the cores that are currently
+ * active in the device.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are actively processing work (i.e.
+ * turned on *and* busy).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type: The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of active cores
+ */
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_trans_cores - Get details of the cores that are currently
+ * transitioning between power states.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are currently transitioning between
+ * power states.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type: The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of transitioning cores
+ */
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_ready_cores - Get details of the cores that are currently
+ * powered and ready for jobs.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are powered and ready for jobs (they may
+ * or may not be currently executing jobs).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type: The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of ready cores
+ */
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
+ enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_clock_on - Turn the clock for the device on, and enable device
+ * interrupts.
+ *
+ * This function can be used by a power policy to turn the clock for the GPU on.
+ * It should be modified during integration to perform the necessary actions to
+ * ensure that the GPU is fully powered and clocked.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @is_resume: true if clock on due to resume after suspend, false otherwise
+ */
+void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
+
+/**
+ * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
+ * device off.
+ *
+ * This function can be used by a power policy to turn the clock for the GPU
+ * off. It should be modified during integration to perform the necessary
+ * actions to turn the clock off (if this is possible in the integration).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @is_suspend: true if clock off due to suspend, false otherwise
+ *
+ * Return: true if clock was turned off, or
+ * false if clock can not be turned off due to pending page/bus fault
+ * workers. Caller must flush MMU workqueues and retry
+ */
+bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend);
+
+/**
+ * kbase_pm_enable_interrupts - Enable interrupts on the device.
+ *
+ * Interrupts are also enabled after a call to kbase_pm_clock_on().
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_disable_interrupts - Disable interrupts on the device.
+ *
+ * This prevents delivery of Power Management interrupts to the CPU so that
+ * kbase_pm_check_transitions_nolock() will not be called from the IRQ handler
+ * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
+ *
+ * Interrupts are also disabled after a call to kbase_pm_clock_off().
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts()
+ * that does not take the hwaccess_lock
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_init_hw - Initialize the hardware.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @flags: Flags specifying the type of PM init
+ *
+ * This function checks the GPU ID register to ensure that the GPU is supported
+ * by the driver and performs a reset on the device so that it is in a known
+ * state before the device is used.
+ *
+ * Return: 0 if the device is supported and successfully reset.
+ */
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
+
+/**
+ * kbase_pm_reset_done - The GPU has been reset successfully.
+ *
+ * This function must be called by the GPU interrupt handler when the
+ * RESET_COMPLETED bit is set. It signals to the power management initialization
+ * code that the GPU has been successfully reset.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_reset_done(struct kbase_device *kbdev);
+
+
+/**
+ * kbase_pm_check_transitions_nolock - Check if there are any power transitions
+ * to make, and if so start them.
+ *
+ * This function will check the desired_xx_state members of
+ * struct kbase_pm_device_data and the actual status of the hardware to see if
+ * any power transitions can be made at this time to make the hardware state
+ * closer to the state desired by the power policy.
+ *
+ * The return value can be used to check whether all the desired cores are
+ * available, and so whether it's worth submitting a job (e.g. from a Power
+ * Management IRQ).
+ *
+ * Note that this still returns true when desired_xx_state has no
+ * cores. That is: of the no cores desired, none were *un*available. In
+ * this case, the caller may still need to try submitting jobs. This is because
+ * the Core Availability Policy might have taken us to an intermediate state
+ * where no cores are powered, before powering on more cores (e.g. for core
+ * rotation)
+ *
+ * The caller must hold kbase_device.pm.power_change_lock
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: non-zero when all desired cores are available. That is,
+ * it's worthwhile for the caller to submit a job.
+ * false otherwise
+ */
+bool kbase_pm_check_transitions_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_check_transitions_sync - Synchronous and locking variant of
+ * kbase_pm_check_transitions_nolock()
+ *
+ * On returning, the desired state at the time of the call will have been met.
+ *
+ * There is nothing to stop the core being switched off by calls to
+ * kbase_pm_release_cores() or kbase_pm_unrequest_cores(). Therefore, the
+ * caller must have already made a call to
+ * kbase_pm_request_cores()/kbase_pm_request_cores_sync() previously.
+ *
+ * The usual use-case for this is to ensure cores are 'READY' after performing
+ * a GPU Reset.
+ *
+ * Unlike kbase_pm_check_transitions_nolock(), the caller must not hold
+ * kbase_device.pm.power_change_lock, because this function will take that
+ * lock itself.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_check_transitions_sync(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state()
+ * where the caller must hold
+ * kbase_device.pm.power_change_lock
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores_state - Update the desired state of shader cores from
+ * the Power Policy, and begin any power
+ * transitions.
+ *
+ * This function will update the desired_xx_state members of
+ * struct kbase_pm_device_data by calling into the current Power Policy. It will
+ * then begin power transitions to make the hardware acheive the desired shader
+ * core state.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_update_cores_state(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_cancel_deferred_poweroff - Cancel any pending requests to power off
+ * the GPU and/or shader cores.
+ *
+ * This should be called by any functions which directly power off the GPU.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_read_present_cores - Read the bitmasks of present cores.
+ *
+ * This information is cached to avoid having to perform register reads whenever
+ * the information is required.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbasep_pm_read_present_cores(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_metrics_init - Initialize the metrics gathering framework.
+ *
+ * This must be called before other metric gathering APIs are called.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 on success, error code on error
+ */
+int kbasep_pm_metrics_init(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_metrics_term - Terminate the metrics gathering framework.
+ *
+ * This must be called when metric gathering is no longer required. It is an
+ * error to call any metrics gathering function (other than
+ * kbasep_pm_metrics_init()) after calling this function.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbasep_pm_metrics_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_report_vsync - Function to be called by the frame buffer driver to
+ * update the vsync metric.
+ *
+ * This function should be called by the frame buffer driver to update whether
+ * the system is hitting the vsync target or not. buffer_updated should be true
+ * if the vsync corresponded with a new frame being displayed, otherwise it
+ * should be false. This function does not need to be called every vsync, but
+ * only when the value of @buffer_updated differs from a previous call.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ * @buffer_updated: True if the buffer has been updated on this VSync,
+ * false otherwise
+ */
+void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
+
+/**
+ * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change
+ * the clock speed of the GPU.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function should be called regularly by the DVFS system to check whether
+ * the clock speed of the GPU needs updating.
+ */
+void kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
+ * needed
+ *
+ * If the caller is the first caller then the GPU cycle counters will be enabled
+ * along with the l2 cache
+ *
+ * The GPU must be powered when calling this function (i.e.
+ * kbase_pm_context_active() must have been called).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
+ * needed (l2 cache already on)
+ *
+ * This is a version of the above function
+ * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
+ * l2 cache is known to be on and assured to be on until the subsequent call of
+ * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does
+ * not sleep and can be called from atomic functions.
+ *
+ * The GPU must be powered when calling this function (i.e.
+ * kbase_pm_context_active() must have been called) and the l2 cache must be
+ * powered on.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
+ * longer in use
+ *
+ * If the caller is the last caller then the GPU cycle counters will be
+ * disabled. A request must have been made before a call to this.
+ *
+ * Caller must not hold the hwaccess_lock, as it will be taken in this function.
+ * If the caller is already holding this lock then
+ * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter()
+ * that does not take hwaccess_lock
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_poweroff_complete - Wait for the poweroff workqueue to
+ * complete
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_register_access_enable - Enable access to GPU registers
+ *
+ * Enables access to the GPU registers before power management has powered up
+ * the GPU with kbase_pm_powerup().
+ *
+ * Access to registers should be done using kbase_os_reg_read()/write() at this
+ * stage, not kbase_reg_read()/write().
+ *
+ * This results in the power management callbacks provided in the driver
+ * configuration to get called to turn on power and/or clocks to the GPU. See
+ * kbase_pm_callback_conf.
+ *
+ * This should only be used before power management is powered up with
+ * kbase_pm_powerup()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_register_access_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_register_access_disable - Disable early register access
+ *
+ * Disables access to the GPU registers enabled earlier by a call to
+ * kbase_pm_register_access_enable().
+ *
+ * This results in the power management callbacks provided in the driver
+ * configuration to get called to turn off power and/or clocks to the GPU. See
+ * kbase_pm_callback_conf
+ *
+ * This should only be used before power management is powered up with
+ * kbase_pm_powerup()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_register_access_disable(struct kbase_device *kbdev);
+
+/* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
+ * function */
+
+/**
+ * kbase_pm_metrics_is_active - Check if the power management metrics
+ * collection is active.
+ *
+ * Note that this returns if the power management metrics collection was
+ * active at the time of calling, it is possible that after the call the metrics
+ * collection enable may have changed state.
+ *
+ * The caller must handle the consequence that the state may have changed.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * Return: true if metrics collection was active else false.
+ */
+bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @is_resume: true if power on due to resume after suspend,
+ * false otherwise
+ */
+void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
+
+/**
+ * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been
+ * requested.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ * pointer)
+ * @is_suspend: true if power off due to suspend,
+ * false otherwise
+ */
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend);
+
+#ifdef CONFIG_PM_DEVFREQ
+void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
+ unsigned long *total, unsigned long *busy);
+void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev);
+#endif
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+
+/**
+ * kbase_platform_dvfs_event - Report utilisation to DVFS code
+ *
+ * Function provided by platform specific code when DVFS is enabled to allow
+ * the power management metrics system to report utilisation.
+ *
+ * @kbdev: The kbase device structure for the device (must be a
+ * valid pointer)
+ * @utilisation: The current calculated utilisation by the metrics system.
+ * @util_gl_share: The current calculated gl share of utilisation.
+ * @util_cl_share: The current calculated cl share of utilisation per core
+ * group.
+ * Return: Returns 0 on failure and non zero on success.
+ */
+
+int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
+ u32 util_gl_share, u32 util_cl_share[2]);
+#endif
+
+void kbase_pm_power_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_metrics_update - Inform the metrics system that an atom is either
+ * about to be run or has just completed.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @now: Pointer to the timestamp of the change, or NULL to use current time
+ *
+ * Caller must hold hwaccess_lock
+ */
+void kbase_pm_metrics_update(struct kbase_device *kbdev,
+ ktime_t *now);
+
+/**
+ * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
+ * If the GPU does not have coherency this is a no-op
+ * @kbdev: Device pointer
+ *
+ * This function should be called after L2 power up.
+ */
+
+void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
+ * If the GPU does not have coherency this is a no-op
+ * @kbdev: Device pointer
+ *
+ * This function should be called before L2 power off.
+ */
+void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
+
+#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_metrics.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_metrics.c
new file mode 100755
index 0000000..7613e1d
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_metrics.c
@@ -0,0 +1,401 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Metrics for power management
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_jm_rb.h>
+
+/* When VSync is being hit aim for utilisation between 70-90% */
+#define KBASE_PM_VSYNC_MIN_UTILISATION 70
+#define KBASE_PM_VSYNC_MAX_UTILISATION 90
+/* Otherwise aim for 10-40% */
+#define KBASE_PM_NO_VSYNC_MIN_UTILISATION 10
+#define KBASE_PM_NO_VSYNC_MAX_UTILISATION 40
+
+/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
+ * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly
+ * under 11s. Exceeding this will cause overflow */
+#define KBASE_PM_TIME_SHIFT 8
+
+/* Maximum time between sampling of utilization data, without resetting the
+ * counters. */
+#define MALI_UTILIZATION_MAX_PERIOD 100000 /* ns = 100ms */
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
+{
+ unsigned long flags;
+ struct kbasep_pm_metrics_data *metrics;
+
+ KBASE_DEBUG_ASSERT(timer != NULL);
+
+ metrics = container_of(timer, struct kbasep_pm_metrics_data, timer);
+ kbase_pm_get_dvfs_action(metrics->kbdev);
+
+ spin_lock_irqsave(&metrics->lock, flags);
+
+ if (metrics->timer_active)
+ hrtimer_start(timer,
+ HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period),
+ HRTIMER_MODE_REL);
+
+ spin_unlock_irqrestore(&metrics->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+int kbasep_pm_metrics_init(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ kbdev->pm.backend.metrics.kbdev = kbdev;
+
+ kbdev->pm.backend.metrics.time_period_start = ktime_get();
+ kbdev->pm.backend.metrics.time_busy = 0;
+ kbdev->pm.backend.metrics.time_idle = 0;
+ kbdev->pm.backend.metrics.prev_busy = 0;
+ kbdev->pm.backend.metrics.prev_idle = 0;
+ kbdev->pm.backend.metrics.gpu_active = false;
+ kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+ kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
+ kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.busy_cl[0] = 0;
+ kbdev->pm.backend.metrics.busy_cl[1] = 0;
+ kbdev->pm.backend.metrics.busy_gl = 0;
+
+ spin_lock_init(&kbdev->pm.backend.metrics.lock);
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ kbdev->pm.backend.metrics.timer_active = true;
+ hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ kbdev->pm.backend.metrics.timer.function = dvfs_callback;
+
+ hrtimer_start(&kbdev->pm.backend.metrics.timer,
+ HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
+ HRTIMER_MODE_REL);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
+
+void kbasep_pm_metrics_term(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbdev->pm.backend.metrics.timer_active = false;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+
+ hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+}
+
+KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
+
+/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+ * function
+ */
+static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
+ ktime_t now)
+{
+ ktime_t diff;
+
+ lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
+
+ diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start);
+ if (ktime_to_ns(diff) < 0)
+ return;
+
+ if (kbdev->pm.backend.metrics.gpu_active) {
+ u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
+
+ kbdev->pm.backend.metrics.time_busy += ns_time;
+ if (kbdev->pm.backend.metrics.active_cl_ctx[0])
+ kbdev->pm.backend.metrics.busy_cl[0] += ns_time;
+ if (kbdev->pm.backend.metrics.active_cl_ctx[1])
+ kbdev->pm.backend.metrics.busy_cl[1] += ns_time;
+ if (kbdev->pm.backend.metrics.active_gl_ctx[0])
+ kbdev->pm.backend.metrics.busy_gl += ns_time;
+ if (kbdev->pm.backend.metrics.active_gl_ctx[1])
+ kbdev->pm.backend.metrics.busy_gl += ns_time;
+ } else {
+ kbdev->pm.backend.metrics.time_idle += (u32) (ktime_to_ns(diff)
+ >> KBASE_PM_TIME_SHIFT);
+ }
+
+ kbdev->pm.backend.metrics.time_period_start = now;
+}
+
+#if defined(CONFIG_PM_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+/* Caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+ * function.
+ */
+static void kbase_pm_reset_dvfs_utilisation_unlocked(struct kbase_device *kbdev,
+ ktime_t now)
+{
+ /* Store previous value */
+ kbdev->pm.backend.metrics.prev_idle =
+ kbdev->pm.backend.metrics.time_idle;
+ kbdev->pm.backend.metrics.prev_busy =
+ kbdev->pm.backend.metrics.time_busy;
+
+ /* Reset current values */
+ kbdev->pm.backend.metrics.time_period_start = now;
+ kbdev->pm.backend.metrics.time_idle = 0;
+ kbdev->pm.backend.metrics.time_busy = 0;
+ kbdev->pm.backend.metrics.busy_cl[0] = 0;
+ kbdev->pm.backend.metrics.busy_cl[1] = 0;
+ kbdev->pm.backend.metrics.busy_gl = 0;
+}
+
+void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, ktime_get());
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
+
+void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
+ unsigned long *total_out, unsigned long *busy_out)
+{
+ ktime_t now = ktime_get();
+ unsigned long flags, busy, total;
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
+
+ busy = kbdev->pm.backend.metrics.time_busy;
+ total = busy + kbdev->pm.backend.metrics.time_idle;
+
+ /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default
+ * 100ms) */
+ if (total >= MALI_UTILIZATION_MAX_PERIOD) {
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
+ } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) {
+ total += kbdev->pm.backend.metrics.prev_idle +
+ kbdev->pm.backend.metrics.prev_busy;
+ busy += kbdev->pm.backend.metrics.prev_busy;
+ }
+
+ *total_out = total;
+ *busy_out = busy;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
+#endif
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+
+/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+ * function
+ */
+int kbase_pm_get_dvfs_utilisation_old(struct kbase_device *kbdev,
+ int *util_gl_share,
+ int util_cl_share[2],
+ ktime_t now)
+{
+ int utilisation;
+ int busy;
+
+ kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
+
+ if (kbdev->pm.backend.metrics.time_idle +
+ kbdev->pm.backend.metrics.time_busy == 0) {
+ /* No data - so we return NOP */
+ utilisation = -1;
+ if (util_gl_share)
+ *util_gl_share = -1;
+ if (util_cl_share) {
+ util_cl_share[0] = -1;
+ util_cl_share[1] = -1;
+ }
+ goto out;
+ }
+
+ utilisation = (100 * kbdev->pm.backend.metrics.time_busy) /
+ (kbdev->pm.backend.metrics.time_idle +
+ kbdev->pm.backend.metrics.time_busy);
+
+ busy = kbdev->pm.backend.metrics.busy_gl +
+ kbdev->pm.backend.metrics.busy_cl[0] +
+ kbdev->pm.backend.metrics.busy_cl[1];
+
+ if (busy != 0) {
+ if (util_gl_share)
+ *util_gl_share =
+ (100 * kbdev->pm.backend.metrics.busy_gl) /
+ busy;
+ if (util_cl_share) {
+ util_cl_share[0] =
+ (100 * kbdev->pm.backend.metrics.busy_cl[0]) /
+ busy;
+ util_cl_share[1] =
+ (100 * kbdev->pm.backend.metrics.busy_cl[1]) /
+ busy;
+ }
+ } else {
+ if (util_gl_share)
+ *util_gl_share = -1;
+ if (util_cl_share) {
+ util_cl_share[0] = -1;
+ util_cl_share[1] = -1;
+ }
+ }
+
+out:
+ return utilisation;
+}
+
+void kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ int utilisation, util_gl_share;
+ int util_cl_share[2];
+ ktime_t now;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+
+ now = ktime_get();
+
+ utilisation = kbase_pm_get_dvfs_utilisation_old(kbdev, &util_gl_share,
+ util_cl_share, now);
+
+ if (utilisation < 0 || util_gl_share < 0 || util_cl_share[0] < 0 ||
+ util_cl_share[1] < 0) {
+ utilisation = 0;
+ util_gl_share = 0;
+ util_cl_share[0] = 0;
+ util_cl_share[1] = 0;
+ goto out;
+ }
+
+out:
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+ kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share,
+ util_cl_share);
+#endif /*CONFIG_MALI_MIDGARD_DVFS */
+
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
+
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
+
+bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
+{
+ bool isactive;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+ isactive = kbdev->pm.backend.metrics.timer_active;
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+
+ return isactive;
+}
+KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active);
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+/**
+ * kbase_pm_metrics_active_calc - Update PM active counts based on currently
+ * running atoms
+ * @kbdev: Device pointer
+ *
+ * The caller must hold kbdev->pm.backend.metrics.lock
+ */
+static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
+{
+ int js;
+
+ lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
+
+ kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
+ kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+ kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+ kbdev->pm.backend.metrics.gpu_active = false;
+
+ for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+ struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
+
+ /* Head atom may have just completed, so if it isn't running
+ * then try the next atom */
+ if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED)
+ katom = kbase_gpu_inspect(kbdev, js, 1);
+
+ if (katom && katom->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED) {
+ if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+ int device_nr = (katom->core_req &
+ BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)
+ ? katom->device_nr : 0;
+ if (!WARN_ON(device_nr >= 2))
+ kbdev->pm.backend.metrics.
+ active_cl_ctx[device_nr] = 1;
+ } else {
+ /* Slot 2 should not be running non-compute
+ * atoms */
+ if (!WARN_ON(js >= 2))
+ kbdev->pm.backend.metrics.
+ active_gl_ctx[js] = 1;
+ }
+ kbdev->pm.backend.metrics.gpu_active = true;
+ }
+ }
+}
+
+/* called when job is submitted to or removed from a GPU slot */
+void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp)
+{
+ unsigned long flags;
+ ktime_t now;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+
+ if (!timestamp) {
+ now = ktime_get();
+ timestamp = &now;
+ }
+
+ /* Track how long CL and/or GL jobs have been busy for */
+ kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp);
+
+ kbase_pm_metrics_active_calc(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_policy.c b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_policy.c
new file mode 100755
index 0000000..92457e8
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_policy.c
@@ -0,0 +1,969 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Power policy API implementations
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_config_defaults.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+static const struct kbase_pm_policy *const policy_list[] = {
+#ifdef CONFIG_MALI_NO_MALI
+ &kbase_pm_always_on_policy_ops,
+ &kbase_pm_demand_policy_ops,
+ &kbase_pm_coarse_demand_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+ &kbase_pm_demand_always_powered_policy_ops,
+ &kbase_pm_fast_start_policy_ops,
+#endif
+#else /* CONFIG_MALI_NO_MALI */
+ &kbase_pm_demand_policy_ops,
+ &kbase_pm_always_on_policy_ops,
+ &kbase_pm_coarse_demand_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+ &kbase_pm_demand_always_powered_policy_ops,
+ &kbase_pm_fast_start_policy_ops,
+#endif
+#endif /* CONFIG_MALI_NO_MALI */
+};
+
+/* The number of policies available in the system.
+ * This is derived from the number of functions listed in policy_get_functions.
+ */
+#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
+
+
+/* Function IDs for looking up Timeline Trace codes in
+ * kbase_pm_change_state_trace_code */
+enum kbase_pm_func_id {
+ KBASE_PM_FUNC_ID_REQUEST_CORES_START,
+ KBASE_PM_FUNC_ID_REQUEST_CORES_END,
+ KBASE_PM_FUNC_ID_RELEASE_CORES_START,
+ KBASE_PM_FUNC_ID_RELEASE_CORES_END,
+ /* Note: kbase_pm_unrequest_cores() is on the slow path, and we neither
+ * expect to hit it nor tend to hit it very much anyway. We can detect
+ * whether we need more instrumentation by a difference between
+ * PM_CHECKTRANS events and PM_SEND/HANDLE_EVENT. */
+
+ /* Must be the last */
+ KBASE_PM_FUNC_ID_COUNT
+};
+
+
+/* State changes during request/unrequest/release-ing cores */
+enum {
+ KBASE_PM_CHANGE_STATE_SHADER = (1u << 0),
+ KBASE_PM_CHANGE_STATE_TILER = (1u << 1),
+
+ /* These two must be last */
+ KBASE_PM_CHANGE_STATE_MASK = (KBASE_PM_CHANGE_STATE_TILER |
+ KBASE_PM_CHANGE_STATE_SHADER),
+ KBASE_PM_CHANGE_STATE_COUNT = KBASE_PM_CHANGE_STATE_MASK + 1
+};
+typedef u32 kbase_pm_change_state;
+
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+/* Timeline Trace code lookups for each function */
+static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT]
+ [KBASE_PM_CHANGE_STATE_COUNT] = {
+ /* kbase_pm_request_cores */
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_START][0] = 0,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER |
+ KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START,
+
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_END][0] = 0,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END,
+ [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER |
+ KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END,
+
+ /* kbase_pm_release_cores */
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_START][0] = 0,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER |
+ KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START,
+
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_END][0] = 0,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END,
+ [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER |
+ KBASE_PM_CHANGE_STATE_TILER] =
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END
+};
+
+static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
+ enum kbase_pm_func_id func_id,
+ kbase_pm_change_state state)
+{
+ int trace_code;
+
+ KBASE_DEBUG_ASSERT(func_id >= 0 && func_id < KBASE_PM_FUNC_ID_COUNT);
+ KBASE_DEBUG_ASSERT(state != 0 && (state & KBASE_PM_CHANGE_STATE_MASK) ==
+ state);
+
+ trace_code = kbase_pm_change_state_trace_code[func_id][state];
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code);
+}
+
+#else /* CONFIG_MALI_TRACE_TIMELINE */
+static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
+ enum kbase_pm_func_id func_id, kbase_pm_change_state state)
+{
+}
+
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+/**
+ * kbasep_pm_do_poweroff_cores - Process a poweroff request and power down any
+ * requested shader cores
+ * @kbdev: Device pointer
+ */
+static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev)
+{
+ u64 prev_shader_state = kbdev->pm.backend.desired_shader_state;
+ u64 prev_tiler_state = kbdev->pm.backend.desired_tiler_state;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbdev->pm.backend.desired_shader_state &=
+ ~kbdev->pm.backend.shader_poweroff_pending;
+ kbdev->pm.backend.desired_tiler_state &=
+ ~kbdev->pm.backend.tiler_poweroff_pending;
+
+ kbdev->pm.backend.shader_poweroff_pending = 0;
+ kbdev->pm.backend.tiler_poweroff_pending = 0;
+
+ if (prev_shader_state != kbdev->pm.backend.desired_shader_state ||
+ prev_tiler_state !=
+ kbdev->pm.backend.desired_tiler_state ||
+ kbdev->pm.backend.ca_in_transition) {
+ bool cores_are_available;
+
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START);
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
+ SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END);
+
+ /* Don't need 'cores_are_available',
+ * because we don't return anything */
+ CSTD_UNUSED(cores_are_available);
+ }
+}
+
+static enum hrtimer_restart
+kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
+{
+ struct kbase_device *kbdev;
+ unsigned long flags;
+
+ kbdev = container_of(timer, struct kbase_device,
+ pm.backend.gpu_poweroff_timer);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* It is safe for this call to do nothing if the work item is already
+ * queued. The worker function will read the must up-to-date state of
+ * kbdev->pm.backend.gpu_poweroff_pending under lock.
+ *
+ * If a state change occurs while the worker function is processing,
+ * this call will succeed as a work item can be requeued once it has
+ * started processing.
+ */
+ if (kbdev->pm.backend.gpu_poweroff_pending)
+ queue_work(kbdev->pm.backend.gpu_poweroff_wq,
+ &kbdev->pm.backend.gpu_poweroff_work);
+
+ if (kbdev->pm.backend.shader_poweroff_pending ||
+ kbdev->pm.backend.tiler_poweroff_pending) {
+ kbdev->pm.backend.shader_poweroff_pending_time--;
+
+ KBASE_DEBUG_ASSERT(
+ kbdev->pm.backend.shader_poweroff_pending_time
+ >= 0);
+
+ if (!kbdev->pm.backend.shader_poweroff_pending_time)
+ kbasep_pm_do_poweroff_cores(kbdev);
+ }
+
+ if (kbdev->pm.backend.poweroff_timer_needed) {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time);
+
+ return HRTIMER_RESTART;
+ }
+
+ kbdev->pm.backend.poweroff_timer_running = false;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
+{
+ unsigned long flags;
+ struct kbase_device *kbdev;
+ bool do_poweroff = false;
+
+ kbdev = container_of(data, struct kbase_device,
+ pm.backend.gpu_poweroff_work);
+
+ mutex_lock(&kbdev->pm.lock);
+
+ if (kbdev->pm.backend.gpu_poweroff_pending == 0) {
+ mutex_unlock(&kbdev->pm.lock);
+ return;
+ }
+
+ kbdev->pm.backend.gpu_poweroff_pending--;
+
+ if (kbdev->pm.backend.gpu_poweroff_pending > 0) {
+ mutex_unlock(&kbdev->pm.lock);
+ return;
+ }
+
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_poweroff_pending == 0);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Only power off the GPU if a request is still pending */
+ if (!kbdev->pm.backend.pm_current_policy->get_core_active(kbdev))
+ do_poweroff = true;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ if (do_poweroff) {
+ kbdev->pm.backend.poweroff_timer_needed = false;
+ hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer);
+ kbdev->pm.backend.poweroff_timer_running = false;
+
+ /* Power off the GPU */
+ kbase_pm_do_poweroff(kbdev, false);
+ }
+
+ mutex_unlock(&kbdev->pm.lock);
+}
+
+int kbase_pm_policy_init(struct kbase_device *kbdev)
+{
+ struct workqueue_struct *wq;
+
+ wq = alloc_workqueue("kbase_pm_do_poweroff",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!wq)
+ return -ENOMEM;
+
+ kbdev->pm.backend.gpu_poweroff_wq = wq;
+ INIT_WORK(&kbdev->pm.backend.gpu_poweroff_work,
+ kbasep_pm_do_gpu_poweroff_wq);
+ hrtimer_init(&kbdev->pm.backend.gpu_poweroff_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kbdev->pm.backend.gpu_poweroff_timer.function =
+ kbasep_pm_do_gpu_poweroff_callback;
+ kbdev->pm.backend.pm_current_policy = policy_list[0];
+ kbdev->pm.backend.pm_current_policy->init(kbdev);
+ kbdev->pm.gpu_poweroff_time =
+ HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS);
+ kbdev->pm.poweroff_shader_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER;
+ kbdev->pm.poweroff_gpu_ticks = DEFAULT_PM_POWEROFF_TICK_GPU;
+
+ return 0;
+}
+
+void kbase_pm_policy_term(struct kbase_device *kbdev)
+{
+ kbdev->pm.backend.pm_current_policy->term(kbdev);
+ destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wq);
+}
+
+void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ kbdev->pm.backend.poweroff_timer_needed = false;
+ hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.poweroff_timer_running = false;
+
+ /* If wq is already running but is held off by pm.lock, make sure it has
+ * no effect */
+ kbdev->pm.backend.gpu_poweroff_pending = 0;
+
+ kbdev->pm.backend.shader_poweroff_pending = 0;
+ kbdev->pm.backend.tiler_poweroff_pending = 0;
+ kbdev->pm.backend.shader_poweroff_pending_time = 0;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_update_active(struct kbase_device *kbdev)
+{
+ struct kbase_pm_device_data *pm = &kbdev->pm;
+ struct kbase_pm_backend_data *backend = &pm->backend;
+ unsigned long flags;
+ bool active;
+
+ lockdep_assert_held(&pm->lock);
+
+ /* pm_current_policy will never be NULL while pm.lock is held */
+ KBASE_DEBUG_ASSERT(backend->pm_current_policy);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ active = backend->pm_current_policy->get_core_active(kbdev);
+
+ if (active) {
+ if (backend->gpu_poweroff_pending) {
+ /* Cancel any pending power off request */
+ backend->gpu_poweroff_pending = 0;
+
+ /* If a request was pending then the GPU was still
+ * powered, so no need to continue */
+ if (!kbdev->poweroff_pending) {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+ return;
+ }
+ }
+
+ if (!backend->poweroff_timer_running && !backend->gpu_powered &&
+ (pm->poweroff_gpu_ticks ||
+ pm->poweroff_shader_ticks)) {
+ backend->poweroff_timer_needed = true;
+ backend->poweroff_timer_running = true;
+ hrtimer_start(&backend->gpu_poweroff_timer,
+ pm->gpu_poweroff_time,
+ HRTIMER_MODE_REL);
+ }
+
+ /* Power on the GPU and any cores requested by the policy */
+ if (pm->backend.poweroff_wait_in_progress) {
+ pm->backend.poweron_required = true;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ kbase_pm_do_poweron(kbdev, false);
+ }
+ } else {
+ /* It is an error for the power policy to power off the GPU
+ * when there are contexts active */
+ KBASE_DEBUG_ASSERT(pm->active_count == 0);
+
+ if (backend->shader_poweroff_pending ||
+ backend->tiler_poweroff_pending) {
+ backend->shader_poweroff_pending = 0;
+ backend->tiler_poweroff_pending = 0;
+ backend->shader_poweroff_pending_time = 0;
+ }
+
+ /* Request power off */
+ if (pm->backend.gpu_powered) {
+ if (pm->poweroff_gpu_ticks) {
+ backend->gpu_poweroff_pending =
+ pm->poweroff_gpu_ticks;
+ backend->poweroff_timer_needed = true;
+ if (!backend->poweroff_timer_running) {
+ /* Start timer if not running (eg if
+ * power policy has been changed from
+ * always_on to something else). This
+ * will ensure the GPU is actually
+ * powered off */
+ backend->poweroff_timer_running
+ = true;
+ hrtimer_start(
+ &backend->gpu_poweroff_timer,
+ pm->gpu_poweroff_time,
+ HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+
+ /* Power off the GPU immediately */
+ kbase_pm_do_poweroff(kbdev, false);
+ }
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+ }
+}
+
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
+{
+ u64 desired_bitmap;
+ u64 desired_tiler_bitmap;
+ bool cores_are_available;
+ bool do_poweroff = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->pm.backend.pm_current_policy == NULL)
+ return;
+ if (kbdev->pm.backend.poweroff_wait_in_progress)
+ return;
+
+ if (kbdev->protected_mode_transition && !kbdev->shader_needed_bitmap &&
+ !kbdev->shader_inuse_bitmap && !kbdev->tiler_needed_cnt
+ && !kbdev->tiler_inuse_cnt) {
+ /* We are trying to change in/out of protected mode - force all
+ * cores off so that the L2 powers down */
+ desired_bitmap = 0;
+ desired_tiler_bitmap = 0;
+ } else {
+ desired_bitmap =
+ kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev);
+ desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
+
+ if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
+ desired_tiler_bitmap = 1;
+ else
+ desired_tiler_bitmap = 0;
+
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
+ /* Unless XAFFINITY is supported, enable core 0 if tiler
+ * required, regardless of core availability */
+ if (kbdev->tiler_needed_cnt > 0 ||
+ kbdev->tiler_inuse_cnt > 0)
+ desired_bitmap |= 1;
+ }
+ }
+
+ if (kbdev->pm.backend.desired_shader_state != desired_bitmap)
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u,
+ (u32)desired_bitmap);
+ /* Are any cores being powered on? */
+ if (~kbdev->pm.backend.desired_shader_state & desired_bitmap ||
+ ~kbdev->pm.backend.desired_tiler_state & desired_tiler_bitmap ||
+ kbdev->pm.backend.ca_in_transition) {
+ /* Check if we are powering off any cores before updating shader
+ * state */
+ if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap ||
+ kbdev->pm.backend.desired_tiler_state &
+ ~desired_tiler_bitmap) {
+ /* Start timer to power off cores */
+ kbdev->pm.backend.shader_poweroff_pending |=
+ (kbdev->pm.backend.desired_shader_state &
+ ~desired_bitmap);
+ kbdev->pm.backend.tiler_poweroff_pending |=
+ (kbdev->pm.backend.desired_tiler_state &
+ ~desired_tiler_bitmap);
+
+ if (kbdev->pm.poweroff_shader_ticks &&
+ !kbdev->protected_mode_transition)
+ kbdev->pm.backend.shader_poweroff_pending_time =
+ kbdev->pm.poweroff_shader_ticks;
+ else
+ do_poweroff = true;
+ }
+
+ kbdev->pm.backend.desired_shader_state = desired_bitmap;
+ kbdev->pm.backend.desired_tiler_state = desired_tiler_bitmap;
+
+ /* If any cores are being powered on, transition immediately */
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+ } else if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap ||
+ kbdev->pm.backend.desired_tiler_state &
+ ~desired_tiler_bitmap) {
+ /* Start timer to power off cores */
+ kbdev->pm.backend.shader_poweroff_pending |=
+ (kbdev->pm.backend.desired_shader_state &
+ ~desired_bitmap);
+ kbdev->pm.backend.tiler_poweroff_pending |=
+ (kbdev->pm.backend.desired_tiler_state &
+ ~desired_tiler_bitmap);
+ if (kbdev->pm.poweroff_shader_ticks &&
+ !kbdev->protected_mode_transition)
+ kbdev->pm.backend.shader_poweroff_pending_time =
+ kbdev->pm.poweroff_shader_ticks;
+ else
+ kbasep_pm_do_poweroff_cores(kbdev);
+ } else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 &&
+ desired_tiler_bitmap != 0 &&
+ kbdev->pm.backend.poweroff_timer_needed) {
+ /* If power policy is keeping cores on despite there being no
+ * active contexts then disable poweroff timer as it isn't
+ * required.
+ * Only reset poweroff_timer_needed if we're not in the middle
+ * of the power off callback */
+ kbdev->pm.backend.poweroff_timer_needed = false;
+ }
+
+ /* Ensure timer does not power off wanted cores and make sure to power
+ * off unwanted cores */
+ if (kbdev->pm.backend.shader_poweroff_pending ||
+ kbdev->pm.backend.tiler_poweroff_pending) {
+ kbdev->pm.backend.shader_poweroff_pending &=
+ ~(kbdev->pm.backend.desired_shader_state &
+ desired_bitmap);
+ kbdev->pm.backend.tiler_poweroff_pending &=
+ ~(kbdev->pm.backend.desired_tiler_state &
+ desired_tiler_bitmap);
+
+ if (!kbdev->pm.backend.shader_poweroff_pending &&
+ !kbdev->pm.backend.tiler_poweroff_pending)
+ kbdev->pm.backend.shader_poweroff_pending_time = 0;
+ }
+
+ /* Shader poweroff is deferred to the end of the function, to eliminate
+ * issues caused by the core availability policy recursing into this
+ * function */
+ if (do_poweroff)
+ kbasep_pm_do_poweroff_cores(kbdev);
+
+ /* Don't need 'cores_are_available', because we don't return anything */
+ CSTD_UNUSED(cores_are_available);
+}
+
+void kbase_pm_update_cores_state(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+int kbase_pm_list_policies(const struct kbase_pm_policy * const **list)
+{
+ if (!list)
+ return POLICY_COUNT;
+
+ *list = policy_list;
+
+ return POLICY_COUNT;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_list_policies);
+
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ return kbdev->pm.backend.pm_current_policy;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_policy);
+
+void kbase_pm_set_policy(struct kbase_device *kbdev,
+ const struct kbase_pm_policy *new_policy)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ const struct kbase_pm_policy *old_policy;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(new_policy != NULL);
+
+ KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id);
+
+ /* During a policy change we pretend the GPU is active */
+ /* A suspend won't happen here, because we're in a syscall from a
+ * userspace thread */
+ kbase_pm_context_active(kbdev);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ /* Remove the policy to prevent IRQ handlers from working on it */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ old_policy = kbdev->pm.backend.pm_current_policy;
+ kbdev->pm.backend.pm_current_policy = NULL;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u,
+ old_policy->id);
+ if (old_policy->term)
+ old_policy->term(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u,
+ new_policy->id);
+ if (new_policy->init)
+ new_policy->init(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.pm_current_policy = new_policy;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* If any core power state changes were previously attempted, but
+ * couldn't be made because the policy was changing (current_policy was
+ * NULL), then re-try them here. */
+ kbase_pm_update_active(kbdev);
+ kbase_pm_update_cores_state(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /* Now the policy change is finished, we release our fake context active
+ * reference */
+ kbase_pm_context_idle(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_set_policy);
+
+/* Check whether a state change has finished, and trace it as completed */
+static void
+kbase_pm_trace_check_and_finish_state_change(struct kbase_device *kbdev)
+{
+ if ((kbdev->shader_available_bitmap &
+ kbdev->pm.backend.desired_shader_state)
+ == kbdev->pm.backend.desired_shader_state &&
+ (kbdev->tiler_available_bitmap &
+ kbdev->pm.backend.desired_tiler_state)
+ == kbdev->pm.backend.desired_tiler_state)
+ kbase_timeline_pm_check_handle_event(kbdev,
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+}
+
+void kbase_pm_request_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores)
+{
+ u64 cores;
+
+ kbase_pm_change_state change_gpu_state = 0u;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ cores = shader_cores;
+ while (cores) {
+ int bitnum = fls64(cores) - 1;
+ u64 bit = 1ULL << bitnum;
+
+ /* It should be almost impossible for this to overflow. It would
+ * require 2^32 atoms to request a particular core, which would
+ * require 2^24 contexts to submit. This would require an amount
+ * of memory that is impossible on a 32-bit system and extremely
+ * unlikely on a 64-bit system. */
+ int cnt = ++kbdev->shader_needed_cnt[bitnum];
+
+ if (1 == cnt) {
+ kbdev->shader_needed_bitmap |= bit;
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
+ }
+
+ cores &= ~bit;
+ }
+
+ if (tiler_required) {
+ int cnt = ++kbdev->tiler_needed_cnt;
+
+ if (1 == cnt)
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
+
+ KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt != 0);
+ }
+
+ if (change_gpu_state) {
+ KBASE_TRACE_ADD(kbdev, PM_REQUEST_CHANGE_SHADER_NEEDED, NULL,
+ NULL, 0u, (u32) kbdev->shader_needed_bitmap);
+
+ kbase_timeline_pm_cores_func(kbdev,
+ KBASE_PM_FUNC_ID_REQUEST_CORES_START,
+ change_gpu_state);
+ kbase_pm_update_cores_state_nolock(kbdev);
+ kbase_timeline_pm_cores_func(kbdev,
+ KBASE_PM_FUNC_ID_REQUEST_CORES_END,
+ change_gpu_state);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_cores);
+
+void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores)
+{
+ kbase_pm_change_state change_gpu_state = 0u;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ while (shader_cores) {
+ int bitnum = fls64(shader_cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0);
+
+ cnt = --kbdev->shader_needed_cnt[bitnum];
+
+ if (0 == cnt) {
+ kbdev->shader_needed_bitmap &= ~bit;
+
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
+ }
+
+ shader_cores &= ~bit;
+ }
+
+ if (tiler_required) {
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0);
+
+ cnt = --kbdev->tiler_needed_cnt;
+
+ if (0 == cnt)
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
+ }
+
+ if (change_gpu_state) {
+ KBASE_TRACE_ADD(kbdev, PM_UNREQUEST_CHANGE_SHADER_NEEDED, NULL,
+ NULL, 0u, (u32) kbdev->shader_needed_bitmap);
+
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ /* Trace that any state change effectively completes immediately
+ * - no-one will wait on the state change */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores);
+
+enum kbase_pm_cores_ready
+kbase_pm_register_inuse_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores)
+{
+ u64 prev_shader_needed; /* Just for tracing */
+ u64 prev_shader_inuse; /* Just for tracing */
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ prev_shader_needed = kbdev->shader_needed_bitmap;
+ prev_shader_inuse = kbdev->shader_inuse_bitmap;
+
+ /* If desired_shader_state does not contain the requested cores, then
+ * power management is not attempting to powering those cores (most
+ * likely due to core availability policy) and a new job affinity must
+ * be chosen */
+ if ((kbdev->pm.backend.desired_shader_state & shader_cores) !=
+ shader_cores) {
+ return (kbdev->pm.backend.poweroff_wait_in_progress ||
+ kbdev->pm.backend.pm_current_policy == NULL) ?
+ KBASE_CORES_NOT_READY : KBASE_NEW_AFFINITY;
+ }
+
+ if ((kbdev->shader_available_bitmap & shader_cores) != shader_cores ||
+ (tiler_required && !kbdev->tiler_available_bitmap)) {
+ /* Trace ongoing core transition */
+ kbase_timeline_pm_l2_transition_start(kbdev);
+ return KBASE_CORES_NOT_READY;
+ }
+
+ /* If we started to trace a state change, then trace it has being
+ * finished by now, at the very latest */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+ /* Trace core transition done */
+ kbase_timeline_pm_l2_transition_done(kbdev);
+
+ while (shader_cores) {
+ int bitnum = fls64(shader_cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0);
+
+ cnt = --kbdev->shader_needed_cnt[bitnum];
+
+ if (0 == cnt)
+ kbdev->shader_needed_bitmap &= ~bit;
+
+ /* shader_inuse_cnt should not overflow because there can only
+ * be a very limited number of jobs on the h/w at one time */
+
+ kbdev->shader_inuse_cnt[bitnum]++;
+ kbdev->shader_inuse_bitmap |= bit;
+
+ shader_cores &= ~bit;
+ }
+
+ if (tiler_required) {
+ KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0);
+
+ --kbdev->tiler_needed_cnt;
+
+ kbdev->tiler_inuse_cnt++;
+
+ KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt != 0);
+ }
+
+ if (prev_shader_needed != kbdev->shader_needed_bitmap)
+ KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_NEEDED, NULL,
+ NULL, 0u, (u32) kbdev->shader_needed_bitmap);
+
+ if (prev_shader_inuse != kbdev->shader_inuse_bitmap)
+ KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_INUSE, NULL,
+ NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
+
+ return KBASE_CORES_READY;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores);
+
+void kbase_pm_release_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores)
+{
+ kbase_pm_change_state change_gpu_state = 0u;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ while (shader_cores) {
+ int bitnum = fls64(shader_cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->shader_inuse_cnt[bitnum] > 0);
+
+ cnt = --kbdev->shader_inuse_cnt[bitnum];
+
+ if (0 == cnt) {
+ kbdev->shader_inuse_bitmap &= ~bit;
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
+ }
+
+ shader_cores &= ~bit;
+ }
+
+ if (tiler_required) {
+ int cnt;
+
+ KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt > 0);
+
+ cnt = --kbdev->tiler_inuse_cnt;
+
+ if (0 == cnt)
+ change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
+ }
+
+ if (change_gpu_state) {
+ KBASE_TRACE_ADD(kbdev, PM_RELEASE_CHANGE_SHADER_INUSE, NULL,
+ NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
+
+ kbase_timeline_pm_cores_func(kbdev,
+ KBASE_PM_FUNC_ID_RELEASE_CORES_START,
+ change_gpu_state);
+ kbase_pm_update_cores_state_nolock(kbdev);
+ kbase_timeline_pm_cores_func(kbdev,
+ KBASE_PM_FUNC_ID_RELEASE_CORES_END,
+ change_gpu_state);
+
+ /* Trace that any state change completed immediately */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_release_cores);
+
+void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
+ bool tiler_required,
+ u64 shader_cores)
+{
+ unsigned long flags;
+
+ kbase_pm_wait_for_poweroff_complete(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_request_cores(kbdev, tiler_required, shader_cores);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_check_transitions_sync(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_cores_sync);
+
+void kbase_pm_request_l2_caches(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ u32 prior_l2_users_count;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ prior_l2_users_count = kbdev->l2_users_count++;
+
+ KBASE_DEBUG_ASSERT(kbdev->l2_users_count != 0);
+
+ /* if the GPU is reset while the l2 is on, l2 will be off but
+ * prior_l2_users_count will be > 0. l2_available_bitmap will have been
+ * set to 0 though by kbase_pm_init_hw */
+ if (!prior_l2_users_count || !kbdev->l2_available_bitmap)
+ kbase_pm_check_transitions_nolock(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ wait_event(kbdev->pm.backend.l2_powered_wait,
+ kbdev->pm.backend.l2_powered == 1);
+
+ /* Trace that any state change completed immediately */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches);
+
+void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbdev->l2_users_count++;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on);
+
+void kbase_pm_release_l2_caches(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ KBASE_DEBUG_ASSERT(kbdev->l2_users_count > 0);
+
+ --kbdev->l2_users_count;
+
+ if (!kbdev->l2_users_count) {
+ kbase_pm_check_transitions_nolock(kbdev);
+ /* Trace that any state change completed immediately */
+ kbase_pm_trace_check_and_finish_state_change(kbdev);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches);
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_pm_policy.h b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_policy.h
new file mode 100755
index 0000000..611a90e
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_pm_policy.h
@@ -0,0 +1,227 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Power policy API definitions
+ */
+
+#ifndef _KBASE_PM_POLICY_H_
+#define _KBASE_PM_POLICY_H_
+
+/**
+ * kbase_pm_policy_init - Initialize power policy framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Must be called before calling any other policy function
+ *
+ * Return: 0 if the power policy framework was successfully
+ * initialized, -errno otherwise.
+ */
+int kbase_pm_policy_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_policy_term - Terminate power policy framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_policy_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_active - Update the active power state of the GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Calls into the current power policy
+ */
+void kbase_pm_update_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores - Update the desired core state of the GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Calls into the current power policy
+ */
+void kbase_pm_update_cores(struct kbase_device *kbdev);
+
+
+enum kbase_pm_cores_ready {
+ KBASE_CORES_NOT_READY = 0,
+ KBASE_NEW_AFFINITY = 1,
+ KBASE_CORES_READY = 2
+};
+
+
+/**
+ * kbase_pm_request_cores_sync - Synchronous variant of kbase_pm_request_cores()
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores which are necessary for the job
+ *
+ * When this function returns, the @shader_cores will be in the READY state.
+ *
+ * This is safe variant of kbase_pm_check_transitions_sync(): it handles the
+ * work of ensuring the requested cores will remain powered until a matching
+ * call to kbase_pm_unrequest_cores()/kbase_pm_release_cores() (as appropriate)
+ * is made.
+ */
+void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores);
+
+/**
+ * kbase_pm_request_cores - Mark one or more cores as being required
+ * for jobs to be submitted
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores which are necessary for the job
+ *
+ * This function is called by the job scheduler to mark one or more cores as
+ * being required to submit jobs that are ready to run.
+ *
+ * The cores requested are reference counted and a subsequent call to
+ * kbase_pm_register_inuse_cores() or kbase_pm_unrequest_cores() should be
+ * made to dereference the cores as being 'needed'.
+ *
+ * The active power policy will meet or exceed the requirements of the
+ * requested cores in the system. Any core transitions needed will be begun
+ * immediately, but they might not complete/the cores might not be available
+ * until a Power Management IRQ.
+ *
+ * Return: 0 if the cores were successfully requested, or -errno otherwise.
+ */
+void kbase_pm_request_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores);
+
+/**
+ * kbase_pm_unrequest_cores - Unmark one or more cores as being required for
+ * jobs to be submitted.
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores (as given to
+ * kbase_pm_request_cores() )
+ *
+ * This function undoes the effect of kbase_pm_request_cores(). It should be
+ * used when a job is not going to be submitted to the hardware (e.g. the job is
+ * cancelled before it is enqueued).
+ *
+ * The active power policy will meet or exceed the requirements of the
+ * requested cores in the system. Any core transitions needed will be begun
+ * immediately, but they might not complete until a Power Management IRQ.
+ *
+ * The policy may use this as an indication that it can power down cores.
+ */
+void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores);
+
+/**
+ * kbase_pm_register_inuse_cores - Register a set of cores as in use by a job
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores (as given to
+ * kbase_pm_request_cores() )
+ *
+ * This function should be called after kbase_pm_request_cores() when the job
+ * is about to be submitted to the hardware. It will check that the necessary
+ * cores are available and if so update the 'needed' and 'inuse' bitmasks to
+ * reflect that the job is now committed to being run.
+ *
+ * If the necessary cores are not currently available then the function will
+ * return %KBASE_CORES_NOT_READY and have no effect.
+ *
+ * Return: %KBASE_CORES_NOT_READY if the cores are not immediately ready,
+ *
+ * %KBASE_NEW_AFFINITY if the affinity requested is not allowed,
+ *
+ * %KBASE_CORES_READY if the cores requested are already available
+ */
+enum kbase_pm_cores_ready kbase_pm_register_inuse_cores(
+ struct kbase_device *kbdev,
+ bool tiler_required,
+ u64 shader_cores);
+
+/**
+ * kbase_pm_release_cores - Release cores after a job has run
+ *
+ * @kbdev: The kbase device structure for the device
+ * @tiler_required: true if the tiler is required, false otherwise
+ * @shader_cores: A bitmask of shader cores (as given to
+ * kbase_pm_register_inuse_cores() )
+ *
+ * This function should be called when a job has finished running on the
+ * hardware. A call to kbase_pm_register_inuse_cores() must have previously
+ * occurred. The reference counts of the specified cores will be decremented
+ * which may cause the bitmask of 'inuse' cores to be reduced. The power policy
+ * may then turn off any cores which are no longer 'inuse'.
+ */
+void kbase_pm_release_cores(struct kbase_device *kbdev,
+ bool tiler_required, u64 shader_cores);
+
+/**
+ * kbase_pm_request_l2_caches - Request l2 caches
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Request the use of l2 caches for all core groups, power up, wait and prevent
+ * the power manager from powering down the l2 caches.
+ *
+ * This tells the power management that the caches should be powered up, and
+ * they should remain powered, irrespective of the usage of shader cores. This
+ * does not return until the l2 caches are powered up.
+ *
+ * The caller must call kbase_pm_release_l2_caches() when they are finished
+ * to allow normal power management of the l2 caches to resume.
+ *
+ * This should only be used when power management is active.
+ */
+void kbase_pm_request_l2_caches(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_l2_caches_l2_is_on - Request l2 caches but don't power on
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Increment the count of l2 users but do not attempt to power on the l2
+ *
+ * It is the callers responsibility to ensure that the l2 is already powered up
+ * and to eventually call kbase_pm_release_l2_caches()
+ */
+void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_l2_caches - Release l2 caches
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Release the use of l2 caches for all core groups and allow the power manager
+ * to power them down when necessary.
+ *
+ * This tells the power management that the caches can be powered down if
+ * necessary, with respect to the usage of shader cores.
+ *
+ * The caller must have called kbase_pm_request_l2_caches() prior to a call
+ * to this.
+ *
+ * This should only be used when power management is active.
+ */
+void kbase_pm_release_l2_caches(struct kbase_device *kbdev);
+
+#endif /* _KBASE_PM_POLICY_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_power_model_simple.c b/midgard-0.r2p0/backend/gpu/mali_kbase_power_model_simple.c
new file mode 100755
index 0000000..d965033
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_power_model_simple.c
@@ -0,0 +1,169 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/devfreq_cooling.h>
+#include <linux/thermal.h>
+#include <linux/of.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <backend/gpu/mali_kbase_power_model_simple.h>
+
+/*
+ * This model is primarily designed for the Juno platform. It may not be
+ * suitable for other platforms.
+ */
+
+#define FALLBACK_STATIC_TEMPERATURE 55000
+
+static u32 dynamic_coefficient;
+static u32 static_coefficient;
+static s32 ts[4];
+static struct thermal_zone_device *gpu_tz;
+
+static unsigned long model_static_power(unsigned long voltage)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+ unsigned long temperature;
+#else
+ int temperature;
+#endif
+ unsigned long temp;
+ unsigned long temp_squared, temp_cubed, temp_scaling_factor;
+ const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
+
+ if (gpu_tz) {
+ int ret;
+
+ ret = gpu_tz->ops->get_temp(gpu_tz, &temperature);
+ if (ret) {
+ pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n",
+ ret);
+ temperature = FALLBACK_STATIC_TEMPERATURE;
+ }
+ } else {
+ temperature = FALLBACK_STATIC_TEMPERATURE;
+ }
+
+ /* Calculate the temperature scaling factor. To be applied to the
+ * voltage scaled power.
+ */
+ temp = temperature / 1000;
+ temp_squared = temp * temp;
+ temp_cubed = temp_squared * temp;
+ temp_scaling_factor =
+ (ts[3] * temp_cubed)
+ + (ts[2] * temp_squared)
+ + (ts[1] * temp)
+ + ts[0];
+
+ return (((static_coefficient * voltage_cubed) >> 20)
+ * temp_scaling_factor)
+ / 1000000;
+}
+
+static unsigned long model_dynamic_power(unsigned long freq,
+ unsigned long voltage)
+{
+ /* The inputs: freq (f) is in Hz, and voltage (v) in mV.
+ * The coefficient (c) is in mW/(MHz mV mV).
+ *
+ * This function calculates the dynamic power after this formula:
+ * Pdyn (mW) = c (mW/(MHz*mV*mV)) * v (mV) * v (mV) * f (MHz)
+ */
+ const unsigned long v2 = (voltage * voltage) / 1000; /* m*(V*V) */
+ const unsigned long f_mhz = freq / 1000000; /* MHz */
+
+ return (dynamic_coefficient * v2 * f_mhz) / 1000000; /* mW */
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+struct devfreq_cooling_ops power_model_simple_ops = {
+#else
+struct devfreq_cooling_power power_model_simple_ops = {
+#endif
+ .get_static_power = model_static_power,
+ .get_dynamic_power = model_dynamic_power,
+};
+
+int kbase_power_model_simple_init(struct kbase_device *kbdev)
+{
+ struct device_node *power_model_node;
+ const char *tz_name;
+ u32 static_power, dynamic_power;
+ u32 voltage, voltage_squared, voltage_cubed, frequency;
+
+ power_model_node = of_get_child_by_name(kbdev->dev->of_node,
+ "power_model");
+ if (!power_model_node) {
+ dev_err(kbdev->dev, "could not find power_model node\n");
+ return -ENODEV;
+ }
+ if (!of_device_is_compatible(power_model_node,
+ "arm,mali-simple-power-model")) {
+ dev_err(kbdev->dev, "power_model incompatible with simple power model\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_string(power_model_node, "thermal-zone",
+ &tz_name)) {
+ dev_err(kbdev->dev, "ts in power_model not available\n");
+ return -EINVAL;
+ }
+
+ gpu_tz = thermal_zone_get_zone_by_name(tz_name);
+ if (IS_ERR(gpu_tz)) {
+ pr_warn_ratelimited("Error getting gpu thermal zone (%ld), not yet ready?\n",
+ PTR_ERR(gpu_tz));
+ gpu_tz = NULL;
+
+ return -EPROBE_DEFER;
+ }
+
+ if (of_property_read_u32(power_model_node, "static-power",
+ &static_power)) {
+ dev_err(kbdev->dev, "static-power in power_model not available\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(power_model_node, "dynamic-power",
+ &dynamic_power)) {
+ dev_err(kbdev->dev, "dynamic-power in power_model not available\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(power_model_node, "voltage",
+ &voltage)) {
+ dev_err(kbdev->dev, "voltage in power_model not available\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(power_model_node, "frequency",
+ &frequency)) {
+ dev_err(kbdev->dev, "frequency in power_model not available\n");
+ return -EINVAL;
+ }
+ voltage_squared = (voltage * voltage) / 1000;
+ voltage_cubed = voltage * voltage * voltage;
+ static_coefficient = (static_power << 20) / (voltage_cubed >> 10);
+ dynamic_coefficient = (((dynamic_power * 1000) / voltage_squared)
+ * 1000) / frequency;
+
+ if (of_property_read_u32_array(power_model_node, "ts", (u32 *)ts, 4)) {
+ dev_err(kbdev->dev, "ts in power_model not available\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_power_model_simple.h b/midgard-0.r2p0/backend/gpu/mali_kbase_power_model_simple.h
new file mode 100755
index 0000000..9b5e69a
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_power_model_simple.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _BASE_POWER_MODEL_SIMPLE_H_
+#define _BASE_POWER_MODEL_SIMPLE_H_
+
+/**
+ * kbase_power_model_simple_init - Initialise the simple power model
+ * @kbdev: Device pointer
+ *
+ * The simple power model estimates power based on current voltage, temperature,
+ * and coefficients read from device tree. It does not take utilization into
+ * account.
+ *
+ * The power model requires coefficients from the power_model node in device
+ * tree. The absence of this node will prevent the model from functioning, but
+ * should not prevent the rest of the driver from running.
+ *
+ * Return: 0 on success
+ * -ENOSYS if the power_model node is not present in device tree
+ * -EPROBE_DEFER if the thermal zone specified in device tree is not
+ * currently available
+ * Any other negative value on failure
+ */
+int kbase_power_model_simple_init(struct kbase_device *kbdev);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+extern struct devfreq_cooling_ops power_model_simple_ops;
+#else
+extern struct devfreq_cooling_power power_model_simple_ops;
+#endif
+
+#endif /* _BASE_POWER_MODEL_SIMPLE_H_ */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_time.c b/midgard-0.r2p0/backend/gpu/mali_kbase_time.c
new file mode 100755
index 0000000..d992989
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_time.c
@@ -0,0 +1,103 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+ u64 *system_time, struct timespec *ts)
+{
+ u32 hi1, hi2;
+
+ kbase_pm_request_gpu_cycle_counter(kbdev);
+
+ /* Read hi, lo, hi to ensure that overflow from lo to hi is handled
+ * correctly */
+ do {
+ hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI),
+ NULL);
+ *cycle_counter = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
+ hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI),
+ NULL);
+ *cycle_counter |= (((u64) hi1) << 32);
+ } while (hi1 != hi2);
+
+ /* Read hi, lo, hi to ensure that overflow from lo to hi is handled
+ * correctly */
+ do {
+ hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI),
+ NULL);
+ *system_time = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(TIMESTAMP_LO), NULL);
+ hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI),
+ NULL);
+ *system_time |= (((u64) hi1) << 32);
+ } while (hi1 != hi2);
+
+ /* Record the CPU's idea of current time */
+ getrawmonotonic(ts);
+
+ kbase_pm_release_gpu_cycle_counter(kbdev);
+}
+
+/**
+ * kbase_wait_write_flush - Wait for GPU write flush
+ * @kctx: Context pointer
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * Only in use for BASE_HW_ISSUE_6367
+ *
+ * Note : If GPU resets occur then the counters are reset to zero, the delay may
+ * not be as expected.
+ */
+#ifndef CONFIG_MALI_NO_MALI
+void kbase_wait_write_flush(struct kbase_context *kctx)
+{
+ u32 base_count = 0;
+
+ /*
+ * The caller must be holding onto the kctx or the call is from
+ * userspace.
+ */
+ kbase_pm_context_active(kctx->kbdev);
+ kbase_pm_request_gpu_cycle_counter(kctx->kbdev);
+
+ while (true) {
+ u32 new_count;
+
+ new_count = kbase_reg_read(kctx->kbdev,
+ GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
+ /* First time around, just store the count. */
+ if (base_count == 0) {
+ base_count = new_count;
+ continue;
+ }
+
+ /* No need to handle wrapping, unsigned maths works for this. */
+ if ((new_count - base_count) > 1000)
+ break;
+ }
+
+ kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
+ kbase_pm_context_idle(kctx->kbdev);
+}
+#endif /* CONFIG_MALI_NO_MALI */
diff --git a/midgard-0.r2p0/backend/gpu/mali_kbase_time.h b/midgard-0.r2p0/backend/gpu/mali_kbase_time.h
new file mode 100755
index 0000000..35088ab
--- /dev/null
+++ b/midgard-0.r2p0/backend/gpu/mali_kbase_time.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_BACKEND_TIME_H_
+#define _KBASE_BACKEND_TIME_H_
+
+/**
+ * kbase_backend_get_gpu_time() - Get current GPU time
+ * @kbdev: Device pointer
+ * @cycle_counter: Pointer to u64 to store cycle counter in
+ * @system_time: Pointer to u64 to store system time in
+ * @ts: Pointer to struct timespec to store current monotonic
+ * time in
+ */
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+ u64 *system_time, struct timespec *ts);
+
+/**
+ * kbase_wait_write_flush() - Wait for GPU write flush
+ * @kctx: Context pointer
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * If GPU resets occur then the counters are reset to zero, the delay may not be
+ * as expected.
+ *
+ * This function is only in use for BASE_HW_ISSUE_6367
+ */
+#ifdef CONFIG_MALI_NO_MALI
+static inline void kbase_wait_write_flush(struct kbase_context *kctx)
+{
+}
+#else
+void kbase_wait_write_flush(struct kbase_context *kctx);
+#endif
+
+#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/midgard-0.r2p0/dkms.conf b/midgard-0.r2p0/dkms.conf
new file mode 100644
index 0000000..446d95d
--- /dev/null
+++ b/midgard-0.r2p0/dkms.conf
@@ -0,0 +1,6 @@
+PACKAGE_VERSION="0.r2p0"
+PACKAGE_NAME="midgard"
+BUILT_MODULE_NAME[0]="mali_kbase"
+MAKE[0]="make EXTRA_CFLAGS='-DCONFIG_MALI_MIDGARD=m -DCONFIG_MALI_BACKEND=gpu -DCONFIG_MALI_PLATFORM_DEVICETREE=y' CONFIG_MALI_MIDGARD=m CONFIG_MALI_BACKEND=gpu CONFIG_MALI_PLATFORM_DEVICETREE=y"
+DEST_MODULE_LOCATION[0]="/extra/"
+AUTOINSTALL="yes" \ No newline at end of file
diff --git a/midgard-0.r2p0/docs/Doxyfile b/midgard-0.r2p0/docs/Doxyfile
new file mode 100755
index 0000000..35ff2f1
--- /dev/null
+++ b/midgard-0.r2p0/docs/Doxyfile
@@ -0,0 +1,126 @@
+#
+# (C) COPYRIGHT 2011-2013, 2015 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+##############################################################################
+
+# This file contains per-module Doxygen configuration. Please do not add
+# extra settings to this file without consulting all stakeholders, as they
+# may cause override project-wide settings.
+#
+# Additionally, when defining aliases, macros, sections etc, use the module
+# name as a prefix e.g. gles_my_alias.
+
+##############################################################################
+
+@INCLUDE = ../../bldsys/Doxyfile_common
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT += ../../kernel/drivers/gpu/arm/midgard/
+
+##############################################################################
+# Everything below here is optional, and in most cases not required
+##############################################################################
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES +=
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS +=
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS +=
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+EXCLUDE += ../../kernel/drivers/gpu/arm/midgard/platform ../../kernel/drivers/gpu/arm/midgard/platform_dummy ../../kernel/drivers/gpu/arm/midgard/scripts ../../kernel/drivers/gpu/arm/midgard/tests ../../kernel/drivers/gpu/arm/midgard/Makefile ../../kernel/drivers/gpu/arm/midgard/Makefile.kbase ../../kernel/drivers/gpu/arm/midgard/Kbuild ../../kernel/drivers/gpu/arm/midgard/Kconfig ../../kernel/drivers/gpu/arm/midgard/sconscript ../../kernel/drivers/gpu/arm/midgard/docs ../../kernel/drivers/gpu/arm/midgard/pm_test_script.sh ../../kernel/drivers/gpu/arm/midgard/mali_uk.h ../../kernel/drivers/gpu/arm/midgard/Makefile
+
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS +=
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS +=
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH +=
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH +=
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH +=
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED +=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED +=
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS += ../../kernel/drivers/gpu/arm/midgard/docs
+
diff --git a/midgard-0.r2p0/docs/policy_operation_diagram.dot b/midgard-0.r2p0/docs/policy_operation_diagram.dot
new file mode 100755
index 0000000..7ae05c2
--- /dev/null
+++ b/midgard-0.r2p0/docs/policy_operation_diagram.dot
@@ -0,0 +1,112 @@
+/*
+ *
+ * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+digraph policy_objects_diagram {
+ rankdir=LR;
+ size="12,8";
+ compound=true;
+
+ node [ shape = box ];
+
+ subgraph cluster_policy_queues {
+ low_queue [ shape=record label = "LowP | {<ql>ctx_lo | ... | <qm>ctx_i | ... | <qr>ctx_hi}" ];
+ queues_middle_sep [ label="" shape=plaintext width=0 height=0 ];
+
+ rt_queue [ shape=record label = "RT | {<ql>ctx_lo | ... | <qm>ctx_j | ... | <qr>ctx_hi}" ];
+
+ label = "Policy's Queue(s)";
+ }
+
+ call_enqueue [ shape=plaintext label="enqueue_ctx()" ];
+
+ {
+ rank=same;
+ ordering=out;
+ call_dequeue [ shape=plaintext label="dequeue_head_ctx()\n+ runpool_add_ctx()" ];
+ call_ctxfinish [ shape=plaintext label="runpool_remove_ctx()" ];
+
+ call_ctxdone [ shape=plaintext label="don't requeue;\n/* ctx has no more jobs */" ];
+ }
+
+ subgraph cluster_runpool {
+
+ as0 [ width=2 height = 0.25 label="AS0: Job_1, ..., Job_n" ];
+ as1 [ width=2 height = 0.25 label="AS1: Job_1, ..., Job_m" ];
+ as2 [ width=2 height = 0.25 label="AS2: Job_1, ..., Job_p" ];
+ as3 [ width=2 height = 0.25 label="AS3: Job_1, ..., Job_q" ];
+
+ label = "Policy's Run Pool";
+ }
+
+ {
+ rank=same;
+ call_jdequeue [ shape=plaintext label="dequeue_job()" ];
+ sstop_dotfixup [ shape=plaintext label="" width=0 height=0 ];
+ }
+
+ {
+ rank=same;
+ ordering=out;
+ sstop [ shape=ellipse label="SS-Timer expires" ]
+ jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
+
+ irq [ label="IRQ" shape=ellipse ];
+
+ job_finish [ shape=plaintext label="don't requeue;\n/* job done */" ];
+ }
+
+ hstop [ shape=ellipse label="HS-Timer expires" ]
+
+ /*
+ * Edges
+ */
+
+ call_enqueue -> queues_middle_sep [ lhead=cluster_policy_queues ];
+
+ low_queue:qr -> call_dequeue:w;
+ rt_queue:qr -> call_dequeue:w;
+
+ call_dequeue -> as1 [lhead=cluster_runpool];
+
+ as1->call_jdequeue [ltail=cluster_runpool];
+ call_jdequeue->jobslots:0;
+ call_jdequeue->sstop_dotfixup [ arrowhead=none];
+ sstop_dotfixup->sstop [label="Spawn SS-Timer"];
+ sstop->jobslots [label="SoftStop"];
+ sstop->hstop [label="Spawn HS-Timer"];
+ hstop->jobslots:ne [label="HardStop"];
+
+
+ as3->call_ctxfinish:ne [ ltail=cluster_runpool ];
+ call_ctxfinish:sw->rt_queue:qm [ lhead=cluster_policy_queues label="enqueue_ctx()\n/* ctx still has jobs */" ];
+
+ call_ctxfinish->call_ctxdone [constraint=false];
+
+ call_ctxdone->call_enqueue [weight=0.1 labeldistance=20.0 labelangle=0.0 taillabel="Job submitted to the ctx" style=dotted constraint=false];
+
+
+ {
+ jobslots->irq [constraint=false];
+
+ irq->job_finish [constraint=false];
+ }
+
+ irq->as2 [lhead=cluster_runpool label="requeue_job()\n/* timeslice expired */" ];
+
+}
diff --git a/midgard-0.r2p0/docs/policy_overview.dot b/midgard-0.r2p0/docs/policy_overview.dot
new file mode 100755
index 0000000..159b993
--- /dev/null
+++ b/midgard-0.r2p0/docs/policy_overview.dot
@@ -0,0 +1,63 @@
+/*
+ *
+ * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+digraph policy_objects_diagram {
+ rankdir=LR
+ size="6,6"
+ compound=true;
+
+ node [ shape = box ];
+
+ call_enqueue [ shape=plaintext label="enqueue ctx" ];
+
+
+ policy_queue [ label="Policy's Queue" ];
+
+ {
+ rank=same;
+ runpool [ label="Policy's Run Pool" ];
+
+ ctx_finish [ label="ctx finished" ];
+ }
+
+ {
+ rank=same;
+ jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
+
+ job_finish [ label="Job finished" ];
+ }
+
+
+
+ /*
+ * Edges
+ */
+
+ call_enqueue -> policy_queue;
+
+ policy_queue->runpool [label="dequeue ctx" weight=0.1];
+ runpool->policy_queue [label="requeue ctx" weight=0.1];
+
+ runpool->ctx_finish [ style=dotted ];
+
+ runpool->jobslots [label="dequeue job" weight=0.1];
+ jobslots->runpool [label="requeue job" weight=0.1];
+
+ jobslots->job_finish [ style=dotted ];
+}
diff --git a/midgard-0.r2p0/mali_base_hwconfig_features.h b/midgard-0.r2p0/mali_base_hwconfig_features.h
new file mode 100755
index 0000000..8b07cbc
--- /dev/null
+++ b/midgard-0.r2p0/mali_base_hwconfig_features.h
@@ -0,0 +1,223 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _BASE_HWCONFIG_FEATURES_H_
+#define _BASE_HWCONFIG_FEATURES_H_
+
+enum base_hw_feature {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ BASE_HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_V4,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_generic[] = {
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t60x[] = {
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_V4,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t62x[] = {
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_V4,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t72x[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_V4,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t76x[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tFxx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t83x[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t82x[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tMIx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tHEx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_XAFFINITY,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_FLUSH_REDUCTION,
+ BASE_HW_FEATURE_PROTECTED_MODE,
+ BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+ BASE_HW_FEATURE_COHERENCY_REG,
+ BASE_HW_FEATURE_END
+};
+
+
+#endif /* _BASE_HWCONFIG_FEATURES_H_ */
diff --git a/midgard-0.r2p0/mali_base_hwconfig_issues.h b/midgard-0.r2p0/mali_base_hwconfig_issues.h
new file mode 100755
index 0000000..4d95b4f
--- /dev/null
+++ b/midgard-0.r2p0/mali_base_hwconfig_issues.h
@@ -0,0 +1,1014 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _BASE_HWCONFIG_ISSUES_H_
+#define _BASE_HWCONFIG_ISSUES_H_
+
+enum base_hw_issue {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6398,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7144,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8073,
+ BASE_HW_ISSUE_8186,
+ BASE_HW_ISSUE_8215,
+ BASE_HW_ISSUE_8245,
+ BASE_HW_ISSUE_8250,
+ BASE_HW_ISSUE_8260,
+ BASE_HW_ISSUE_8280,
+ BASE_HW_ISSUE_8316,
+ BASE_HW_ISSUE_8381,
+ BASE_HW_ISSUE_8394,
+ BASE_HW_ISSUE_8401,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8443,
+ BASE_HW_ISSUE_8456,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8634,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8791,
+ BASE_HW_ISSUE_8833,
+ BASE_HW_ISSUE_8879,
+ BASE_HW_ISSUE_8896,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_8986,
+ BASE_HW_ISSUE_8987,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_9566,
+ BASE_HW_ISSUE_9630,
+ BASE_HW_ISSUE_10127,
+ BASE_HW_ISSUE_10327,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10817,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_10984,
+ BASE_HW_ISSUE_10995,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_7940,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TMIX_8138,
+ BASE_HW_ISSUE_TMIX_8206,
+ BASE_HW_ISSUE_TMIX_8343,
+ BASE_HW_ISSUE_TMIX_8463,
+ BASE_HW_ISSUE_TMIX_8456,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_generic[] = {
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6398,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7144,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8073,
+ BASE_HW_ISSUE_8186,
+ BASE_HW_ISSUE_8215,
+ BASE_HW_ISSUE_8245,
+ BASE_HW_ISSUE_8250,
+ BASE_HW_ISSUE_8260,
+ BASE_HW_ISSUE_8280,
+ BASE_HW_ISSUE_8316,
+ BASE_HW_ISSUE_8381,
+ BASE_HW_ISSUE_8394,
+ BASE_HW_ISSUE_8401,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8443,
+ BASE_HW_ISSUE_8456,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8634,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8791,
+ BASE_HW_ISSUE_8833,
+ BASE_HW_ISSUE_8896,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_8986,
+ BASE_HW_ISSUE_8987,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_9566,
+ BASE_HW_ISSUE_9630,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_10984,
+ BASE_HW_ISSUE_10995,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_3964,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10127,
+ BASE_HW_ISSUE_10327,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10817,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r1p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t72x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t76x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t60x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t62x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3964,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r0p2[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r2p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tFRx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r0p2[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r2p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3966,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t86x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_T76X_3982,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t83x_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t83x_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t83x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3964,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r0p1[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1909,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t82x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11051,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TMIX_8138,
+ BASE_HW_ISSUE_TMIX_8343,
+ BASE_HW_ISSUE_TMIX_8463,
+ BASE_HW_ISSUE_TMIX_8456,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_7940,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TMIX_8138,
+ BASE_HW_ISSUE_TMIX_8206,
+ BASE_HW_ISSUE_TMIX_8343,
+ BASE_HW_ISSUE_TMIX_8463,
+ BASE_HW_ISSUE_TMIX_8456,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tMIx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3982,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_7940,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TMIX_8138,
+ BASE_HW_ISSUE_TMIX_8206,
+ BASE_HW_ISSUE_TMIX_8343,
+ BASE_HW_ISSUE_TMIX_8456,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tHEx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_TMIX_7891,
+ BASE_HW_ISSUE_TMIX_8042,
+ BASE_HW_ISSUE_TMIX_8133,
+ GPUCORE_1619,
+ BASE_HW_ISSUE_END
+};
+
+
+
+#endif /* _BASE_HWCONFIG_ISSUES_H_ */
diff --git a/midgard-0.r2p0/mali_base_kernel.h b/midgard-0.r2p0/mali_base_kernel.h
new file mode 100755
index 0000000..bcb05e4
--- /dev/null
+++ b/midgard-0.r2p0/mali_base_kernel.h
@@ -0,0 +1,1819 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file
+ * Base structures shared with the kernel.
+ */
+
+#ifndef _BASE_KERNEL_H_
+#define _BASE_KERNEL_H_
+
+#ifndef __user
+#define __user
+#endif
+
+/* Support UK6 IOCTLS */
+#define BASE_LEGACY_UK6_SUPPORT 1
+
+/* Support UK7 IOCTLS */
+/* NB: To support UK6 we also need to support UK7 */
+#define BASE_LEGACY_UK7_SUPPORT 1
+
+/* Support UK8 IOCTLS */
+#define BASE_LEGACY_UK8_SUPPORT 1
+
+/* Support UK9 IOCTLS */
+#define BASE_LEGACY_UK9_SUPPORT 1
+
+/* Support UK10_2 IOCTLS */
+#define BASE_LEGACY_UK10_2_SUPPORT 1
+
+/* Support UK10_4 IOCTLS */
+#define BASE_LEGACY_UK10_4_SUPPORT 1
+
+typedef struct base_mem_handle {
+ struct {
+ u64 handle;
+ } basep;
+} base_mem_handle;
+
+#include "mali_base_mem_priv.h"
+#include "mali_kbase_profiling_gator_api.h"
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
+
+/*
+ * Dependency stuff, keep it private for now. May want to expose it if
+ * we decide to make the number of semaphores a configurable
+ * option.
+ */
+#define BASE_JD_ATOM_COUNT 256
+
+#define BASEP_JD_SEM_PER_WORD_LOG2 5
+#define BASEP_JD_SEM_PER_WORD (1 << BASEP_JD_SEM_PER_WORD_LOG2)
+#define BASEP_JD_SEM_WORD_NR(x) ((x) >> BASEP_JD_SEM_PER_WORD_LOG2)
+#define BASEP_JD_SEM_MASK_IN_WORD(x) (1 << ((x) & (BASEP_JD_SEM_PER_WORD - 1)))
+#define BASEP_JD_SEM_ARRAY_SIZE BASEP_JD_SEM_WORD_NR(BASE_JD_ATOM_COUNT)
+
+/* Set/reset values for a software event */
+#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
+
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
+
+#define BASE_MAX_COHERENT_GROUPS 16
+
+#if defined CDBG_ASSERT
+#define LOCAL_ASSERT CDBG_ASSERT
+#elif defined KBASE_DEBUG_ASSERT
+#define LOCAL_ASSERT KBASE_DEBUG_ASSERT
+#else
+#error assert macro not defined!
+#endif
+
+#if defined PAGE_MASK
+#define LOCAL_PAGE_LSB ~PAGE_MASK
+#else
+#include <osu/mali_osu.h>
+
+#if defined OSU_CONFIG_CPU_PAGE_SIZE_LOG2
+#define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1)
+#else
+#error Failed to find page size
+#endif
+#endif
+
+/** 32/64-bit neutral way to represent pointers */
+typedef union kbase_pointer {
+ void __user *value; /**< client should store their pointers here */
+ u32 compat_value; /**< 64-bit kernels should fetch value here when handling 32-bit clients */
+ u64 sizer; /**< Force 64-bit storage for all clients regardless */
+} kbase_pointer;
+
+/**
+ * @addtogroup base_user_api User-side Base APIs
+ * @{
+ */
+
+/**
+ * @addtogroup base_user_api_memory User-side Base Memory APIs
+ * @{
+ */
+
+/**
+ * @brief Memory allocation, access/hint flags
+ *
+ * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator
+ * in order to determine the best cache policy. Some combinations are
+ * of course invalid (eg @c MEM_PROT_CPU_WR | @c MEM_HINT_CPU_RD),
+ * which defines a @a write-only region on the CPU side, which is
+ * heavily read by the CPU...
+ * Other flags are only meaningful to a particular allocator.
+ * More flags can be added to this list, as long as they don't clash
+ * (see ::BASE_MEM_FLAGS_NR_BITS for the number of the first free bit).
+ */
+typedef u32 base_mem_alloc_flags;
+
+/**
+ * @brief Memory allocation, access/hint flags
+ *
+ * See ::base_mem_alloc_flags.
+ *
+ */
+enum {
+/* IN */
+ BASE_MEM_PROT_CPU_RD = (1U << 0), /**< Read access CPU side */
+ BASE_MEM_PROT_CPU_WR = (1U << 1), /**< Write access CPU side */
+ BASE_MEM_PROT_GPU_RD = (1U << 2), /**< Read access GPU side */
+ BASE_MEM_PROT_GPU_WR = (1U << 3), /**< Write access GPU side */
+ BASE_MEM_PROT_GPU_EX = (1U << 4), /**< Execute allowed on the GPU
+ side */
+
+ /* BASE_MEM_HINT flags have been removed, but their values are reserved
+ * for backwards compatibility with older user-space drivers. The values
+ * can be re-used once support for r5p0 user-space drivers is removed,
+ * presumably in r7p0.
+ *
+ * RESERVED: (1U << 5)
+ * RESERVED: (1U << 6)
+ * RESERVED: (1U << 7)
+ * RESERVED: (1U << 8)
+ */
+
+ BASE_MEM_GROW_ON_GPF = (1U << 9), /**< Grow backing store on GPU
+ Page Fault */
+
+ BASE_MEM_COHERENT_SYSTEM = (1U << 10), /**< Page coherence Outer
+ shareable, if available */
+ BASE_MEM_COHERENT_LOCAL = (1U << 11), /**< Page coherence Inner
+ shareable */
+ BASE_MEM_CACHED_CPU = (1U << 12), /**< Should be cached on the
+ CPU */
+
+/* IN/OUT */
+ BASE_MEM_SAME_VA = (1U << 13), /**< Must have same VA on both the GPU
+ and the CPU */
+/* OUT */
+ BASE_MEM_NEED_MMAP = (1U << 14), /**< Must call mmap to aquire a GPU
+ address for the alloc */
+/* IN */
+ BASE_MEM_COHERENT_SYSTEM_REQUIRED = (1U << 15), /**< Page coherence
+ Outer shareable, required. */
+ BASE_MEM_SECURE = (1U << 16), /**< Secure memory */
+ BASE_MEM_DONT_NEED = (1U << 17), /**< Not needed physical
+ memory */
+ BASE_MEM_IMPORT_SHARED = (1U << 18), /**< Must use shared CPU/GPU zone
+ (SAME_VA zone) but doesn't
+ require the addresses to
+ be the same */
+};
+
+/**
+ * @brief Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the ::base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 19
+
+/**
+ * A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/**
+ * A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+ (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/**
+ * A mask for all the flags which are modifiable via the base_mem_set_flags
+ * interface.
+ */
+#define BASE_MEM_FLAGS_MODIFIABLE \
+ (BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \
+ BASE_MEM_COHERENT_LOCAL)
+
+/**
+ * enum base_mem_import_type - Memory types supported by @a base_mem_import
+ *
+ * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type
+ * @BASE_MEM_IMPORT_TYPE_UMP: UMP import. Handle type is ump_secure_id.
+ * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int)
+ * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a
+ * base_mem_import_user_buffer
+ *
+ * Each type defines what the supported handle type is.
+ *
+ * If any new type is added here ARM must be contacted
+ * to allocate a numeric value for it.
+ * Do not just add a new type without synchronizing with ARM
+ * as future releases from ARM might include other new types
+ * which could clash with your custom types.
+ */
+typedef enum base_mem_import_type {
+ BASE_MEM_IMPORT_TYPE_INVALID = 0,
+ BASE_MEM_IMPORT_TYPE_UMP = 1,
+ BASE_MEM_IMPORT_TYPE_UMM = 2,
+ BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
+} base_mem_import_type;
+
+/**
+ * struct base_mem_import_user_buffer - Handle of an imported user buffer
+ *
+ * @ptr: kbase_pointer to imported user buffer
+ * @length: length of imported user buffer in bytes
+ *
+ * This structure is used to represent a handle of an imported user buffer.
+ */
+
+struct base_mem_import_user_buffer {
+ kbase_pointer ptr;
+ u64 length;
+};
+
+/**
+ * @brief Invalid memory handle.
+ *
+ * Return value from functions returning @ref base_mem_handle on error.
+ *
+ * @warning @ref base_mem_handle_new_invalid must be used instead of this macro
+ * in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_INVALID_HANDLE ((base_mem_handle) { {BASEP_MEM_INVALID_HANDLE} })
+
+/**
+ * @brief Special write-alloc memory handle.
+ *
+ * A special handle is used to represent a region where a special page is mapped
+ * with a write-alloc cache setup, typically used when the write result of the
+ * GPU isn't needed, but the GPU must write anyway.
+ *
+ * @warning @ref base_mem_handle_new_write_alloc must be used instead of this macro
+ * in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE ((base_mem_handle) { {BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE} })
+
+#define BASEP_MEM_INVALID_HANDLE (0ull << 12)
+#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12)
+#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
+/* reserved handles ..-64<<PAGE_SHIFT> for future special handles */
+#define BASE_MEM_COOKIE_BASE (64ul << 12)
+#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
+ BASE_MEM_COOKIE_BASE)
+
+/* Mask to detect 4GB boundary alignment */
+#define BASE_MEM_MASK_4GB 0xfffff000UL
+
+
+/* Bit mask of cookies used for for memory allocation setup */
+#define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */
+
+
+/**
+ * @brief Result codes of changing the size of the backing store allocated to a tmem region
+ */
+typedef enum base_backing_threshold_status {
+ BASE_BACKING_THRESHOLD_OK = 0, /**< Resize successful */
+ BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE = -1, /**< Not a growable tmem object */
+ BASE_BACKING_THRESHOLD_ERROR_OOM = -2, /**< Increase failed due to an out-of-memory condition */
+ BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS = -4 /**< Invalid arguments (not tmem, illegal size request, etc.) */
+} base_backing_threshold_status;
+
+/**
+ * @addtogroup base_user_api_memory_defered User-side Base Defered Memory Coherency APIs
+ * @{
+ */
+
+/**
+ * @brief a basic memory operation (sync-set).
+ *
+ * The content of this structure is private, and should only be used
+ * by the accessors.
+ */
+typedef struct base_syncset {
+ struct basep_syncset basep_sset;
+} base_syncset;
+
+/** @} end group base_user_api_memory_defered */
+
+/**
+ * Handle to represent imported memory object.
+ * Simple opague handle to imported memory, can't be used
+ * with anything but base_external_resource_init to bind to an atom.
+ */
+typedef struct base_import_handle {
+ struct {
+ u64 handle;
+ } basep;
+} base_import_handle;
+
+/** @} end group base_user_api_memory */
+
+/**
+ * @addtogroup base_user_api_job_dispatch User-side Base Job Dispatcher APIs
+ * @{
+ */
+
+typedef int platform_fence_type;
+#define INVALID_PLATFORM_FENCE ((platform_fence_type)-1)
+
+/**
+ * Base stream handle.
+ *
+ * References an underlying base stream object.
+ */
+typedef struct base_stream {
+ struct {
+ int fd;
+ } basep;
+} base_stream;
+
+/**
+ * Base fence handle.
+ *
+ * References an underlying base fence object.
+ */
+typedef struct base_fence {
+ struct {
+ int fd;
+ int stream_fd;
+ } basep;
+} base_fence;
+
+/**
+ * @brief Per-job data
+ *
+ * This structure is used to store per-job data, and is completly unused
+ * by the Base driver. It can be used to store things such as callback
+ * function pointer, data to handle job completion. It is guaranteed to be
+ * untouched by the Base driver.
+ */
+typedef struct base_jd_udata {
+ u64 blob[2]; /**< per-job data array */
+} base_jd_udata;
+
+/**
+ * @brief Memory aliasing info
+ *
+ * Describes a memory handle to be aliased.
+ * A subset of the handle can be chosen for aliasing, given an offset and a
+ * length.
+ * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a
+ * region where a special page is mapped with a write-alloc cache setup,
+ * typically used when the write result of the GPU isn't needed, but the GPU
+ * must write anyway.
+ *
+ * Offset and length are specified in pages.
+ * Offset must be within the size of the handle.
+ * Offset+length must not overrun the size of the handle.
+ *
+ * @handle Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
+ * @offset Offset within the handle to start aliasing from, in pages.
+ * Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE.
+ * @length Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
+ * specifies the number of times the special page is needed.
+ */
+struct base_mem_aliasing_info {
+ base_mem_handle handle;
+ u64 offset;
+ u64 length;
+};
+
+/**
+ * struct base_jit_alloc_info - Structure which describes a JIT allocation
+ * request.
+ * @gpu_alloc_addr: The GPU virtual address to write the JIT
+ * allocated GPU virtual address to.
+ * @va_pages: The minimum number of virtual pages required.
+ * @commit_pages: The minimum number of physical pages which
+ * should back the allocation.
+ * @extent: Granularity of physical pages to grow the
+ * allocation by during a fault.
+ * @id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ * Zero is not a valid value.
+ */
+struct base_jit_alloc_info {
+ u64 gpu_alloc_addr;
+ u64 va_pages;
+ u64 commit_pages;
+ u64 extent;
+ u8 id;
+};
+
+/**
+ * @brief Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a dependency is a data or
+ * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without
+ * changing the structure size).
+ * When the flag is set for a particular dependency to signal that it is an ordering only dependency then
+ * errors will not be propagated.
+ */
+typedef u8 base_jd_dep_type;
+
+
+#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
+
+/**
+ * @brief Job chain hardware requirements.
+ *
+ * A job chain must specify what GPU features it needs to allow the
+ * driver to schedule the job correctly. By not specifying the
+ * correct settings can/will cause an early job termination. Multiple
+ * values can be ORed together to specify multiple requirements.
+ * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
+ * dependencies, and that doesn't execute anything on the hardware.
+ */
+typedef u32 base_jd_core_req;
+
+/* Requirements that come from the HW */
+
+/**
+ * No requirement, dependency only
+ */
+#define BASE_JD_REQ_DEP ((base_jd_core_req)0)
+
+/**
+ * Requires fragment shaders
+ */
+#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0)
+
+/**
+ * Requires compute shaders
+ * This covers any of the following Midgard Job types:
+ * - Vertex Shader Job
+ * - Geometry Shader Job
+ * - An actual Compute Shader Job
+ *
+ * Compare this with @ref BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
+ * job is specifically just the "Compute Shader" job type, and not the "Vertex
+ * Shader" nor the "Geometry Shader" job type.
+ */
+#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1)
+#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2) /**< Requires tiling */
+#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3) /**< Requires cache flushes */
+#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4) /**< Requires value writeback */
+
+/* SW-only requirements - the HW does not expose these as part of the job slot capabilities */
+
+/* Requires fragment job with AFBC encoding */
+#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13)
+
+/**
+ * SW-only requirement: coalesce completion events.
+ * If this bit is set then completion of this atom will not cause an event to
+ * be sent to userspace, whether successful or not; completion events will be
+ * deferred until an atom completes which does not have this bit set.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
+ */
+#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
+
+/**
+ * SW Only requirement: the job chain requires a coherent core group. We don't
+ * mind which coherent core group is used.
+ */
+#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6)
+
+/**
+ * SW Only requirement: The performance counters should be enabled only when
+ * they are needed, to reduce power consumption.
+ */
+
+#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7)
+
+/**
+ * SW Only requirement: External resources are referenced by this atom.
+ * When external resources are referenced no syncsets can be bundled with the atom
+ * but should instead be part of a NULL jobs inserted into the dependency tree.
+ * The first pre_dep object must be configured for the external resouces to use,
+ * the second pre_dep object can be used to create other dependencies.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE.
+ */
+#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8)
+
+/**
+ * SW Only requirement: Software defined job. Jobs with this bit set will not be submitted
+ * to the hardware but will cause some action to happen within the driver
+ */
+#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9)
+
+#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1)
+#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2)
+#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3)
+
+/**
+ * SW Only requirement : Replay job.
+ *
+ * If the preceeding job fails, the replay job will cause the jobs specified in
+ * the list of base_jd_replay_payload pointed to by the jc pointer to be
+ * replayed.
+ *
+ * A replay job will only cause jobs to be replayed up to BASEP_JD_REPLAY_LIMIT
+ * times. If a job fails more than BASEP_JD_REPLAY_LIMIT times then the replay
+ * job is failed, as well as any following dependencies.
+ *
+ * The replayed jobs will require a number of atom IDs. If there are not enough
+ * free atom IDs then the replay job will fail.
+ *
+ * If the preceeding job does not fail, then the replay job is returned as
+ * completed.
+ *
+ * The replayed jobs will never be returned to userspace. The preceeding failed
+ * job will be returned to userspace as failed; the status of this job should
+ * be ignored. Completion should be determined by the status of the replay soft
+ * job.
+ *
+ * In order for the jobs to be replayed, the job headers will have to be
+ * modified. The Status field will be reset to NOT_STARTED. If the Job Type
+ * field indicates a Vertex Shader Job then it will be changed to Null Job.
+ *
+ * The replayed jobs have the following assumptions :
+ *
+ * - No external resources. Any required external resources will be held by the
+ * replay atom.
+ * - Pre-dependencies are created based on job order.
+ * - Atom numbers are automatically assigned.
+ * - device_nr is set to 0. This is not relevant as
+ * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
+ * - Priority is inherited from the replay job.
+ */
+#define BASE_JD_REQ_SOFT_REPLAY (BASE_JD_REQ_SOFT_JOB | 0x4)
+/**
+ * SW only requirement: event wait/trigger job.
+ *
+ * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
+ * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
+ * other waiting jobs. It completes immediately.
+ * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
+ * possible for other jobs to wait upon. It completes immediately.
+ */
+#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
+
+#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
+
+/**
+ * SW only requirement: Just In Time allocation
+ *
+ * This job requests a JIT allocation based on the request in the
+ * @base_jit_alloc_info structure which is passed via the jc element of
+ * the atom.
+ *
+ * It should be noted that the id entry in @base_jit_alloc_info must not
+ * be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE.
+ *
+ * Should this soft job fail it is expected that a @BASE_JD_REQ_SOFT_JIT_FREE
+ * soft job to free the JIT allocation is still made.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
+/**
+ * SW only requirement: Just In Time free
+ *
+ * This job requests a JIT allocation created by @BASE_JD_REQ_SOFT_JIT_ALLOC
+ * to be freed. The ID of the JIT allocation is passed via the jc element of
+ * the atom.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
+
+/**
+ * SW only requirement: Map external resource
+ *
+ * This job requests external resource(s) are mapped once the dependencies
+ * of the job have been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
+/**
+ * SW only requirement: Unmap external resource
+ *
+ * This job requests external resource(s) are unmapped once the dependencies
+ * of the job has been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
+
+/**
+ * HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
+ *
+ * This indicates that the Job Chain contains Midgard Jobs of the 'Compute Shaders' type.
+ *
+ * In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job
+ * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
+ */
+#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10)
+
+/**
+ * HW Requirement: Use the base_jd_atom::device_nr field to specify a
+ * particular core group
+ *
+ * If both @ref BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag takes priority
+ *
+ * This is only guaranteed to work for @ref BASE_JD_REQ_ONLY_COMPUTE atoms.
+ *
+ * If the core availability policy is keeping the required core group turned off, then
+ * the job will fail with a @ref BASE_JD_EVENT_PM_EVENT error code.
+ */
+#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
+
+/**
+ * SW Flag: If this bit is set then the successful completion of this atom
+ * will not cause an event to be sent to userspace
+ */
+#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12)
+
+/**
+ * SW Flag: If this bit is set then completion of this atom will not cause an
+ * event to be sent to userspace, whether successful or not.
+ */
+#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
+
+/**
+ * SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job starts which does not have this bit set or a job completes
+ * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use if
+ * the CPU may have written to memory addressed by the job since the last job
+ * without this bit set was submitted.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
+
+/**
+ * SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job completes which does not have this bit set or a job starts
+ * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_START bti set. Do not use if
+ * the CPU may read from or partially overwrite memory addressed by the job
+ * before the next job without this bit set completes.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
+
+/**
+ * These requirement bits are currently unused in base_jd_core_req
+ */
+#define BASEP_JD_REQ_RESERVED \
+ (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
+ BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
+ BASE_JD_REQ_EVENT_COALESCE | \
+ BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
+ BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
+ BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END))
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of the atom.
+ *
+ * This allows dependency only atoms to have flags set
+ */
+#define BASE_JD_REQ_ATOM_TYPE \
+ (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
+ BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of a soft job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
+
+/**
+ * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
+ * handles retaining cores for power management and affinity management.
+ *
+ * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
+ * where lots of atoms could be submitted before powerup, and each has an
+ * affinity chosen that causes other atoms to have an affinity
+ * violation. Whilst the affinity was not causing violations at the time it
+ * was chosen, it could cause violations thereafter. For example, 1000 jobs
+ * could have had their affinity chosen during the powerup time, so any of
+ * those 1000 jobs could cause an affinity violation later on.
+ *
+ * The attack would otherwise occur because other atoms/contexts have to wait for:
+ * -# the currently running atoms (which are causing the violation) to
+ * finish
+ * -# and, the atoms that had their affinity chosen during powerup to
+ * finish. These are run preferrentially because they don't cause a
+ * violation, but instead continue to cause the violation in others.
+ * -# or, the attacker is scheduled out (which might not happen for just 2
+ * contexts)
+ *
+ * By re-choosing the affinity (which is designed to avoid violations at the
+ * time it's chosen), we break condition (2) of the wait, which minimizes the
+ * problem to just waiting for current jobs to finish (which can be bounded if
+ * the Job Scheduling Policy has a timer).
+ */
+enum kbase_atom_coreref_state {
+ /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
+ KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
+ /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
+ /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
+ /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
+ KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
+ /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
+ KBASE_ATOM_COREREF_STATE_READY
+};
+
+/*
+ * Base Atom priority
+ *
+ * Only certain priority levels are actually implemented, as specified by the
+ * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
+ * level that is not one of those defined below.
+ *
+ * Priority levels only affect scheduling between atoms of the same type within
+ * a base context, and only after the atoms have had dependencies resolved.
+ * Fragment atoms does not affect non-frament atoms with lower priorities, and
+ * the other way around. For example, a low priority atom that has had its
+ * dependencies resolved might run before a higher priority atom that has not
+ * had its dependencies resolved.
+ *
+ * The scheduling between base contexts/processes and between atoms from
+ * different base contexts/processes is unaffected by atom priority.
+ *
+ * The atoms are scheduled as follows with respect to their priorities:
+ * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies
+ * resolved, and atom 'X' has a higher priority than atom 'Y'
+ * - If atom 'Y' is currently running on the HW, then it is interrupted to
+ * allow atom 'X' to run soon after
+ * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing
+ * the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
+ * - Any two atoms that have the same priority could run in any order with
+ * respect to each other. That is, there is no ordering constraint between
+ * atoms of the same priority.
+ */
+typedef u8 base_jd_prio;
+
+/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
+/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
+ * BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
+/* Low atom priority. */
+#define BASE_JD_PRIO_LOW ((base_jd_prio)2)
+
+/* Count of the number of priority levels. This itself is not a valid
+ * base_jd_prio setting */
+#define BASE_JD_NR_PRIO_LEVELS 3
+
+enum kbase_jd_atom_state {
+ /** Atom is not used */
+ KBASE_JD_ATOM_STATE_UNUSED,
+ /** Atom is queued in JD */
+ KBASE_JD_ATOM_STATE_QUEUED,
+ /** Atom has been given to JS (is runnable/running) */
+ KBASE_JD_ATOM_STATE_IN_JS,
+ /** Atom has been completed, but not yet handed back to job dispatcher
+ * for dependency resolution */
+ KBASE_JD_ATOM_STATE_HW_COMPLETED,
+ /** Atom has been completed, but not yet handed back to userspace */
+ KBASE_JD_ATOM_STATE_COMPLETED
+};
+
+typedef u8 base_atom_id; /**< Type big enough to store an atom number in */
+
+struct base_dependency {
+ base_atom_id atom_id; /**< An atom number */
+ base_jd_dep_type dependency_type; /**< Dependency type */
+};
+
+/* This structure has changed since UK 10.2 for which base_jd_core_req was a u16 value.
+ * In order to keep the size of the structure same, padding field has been adjusted
+ * accordingly and core_req field of a u32 type (to which UK 10.3 base_jd_core_req defines)
+ * is added at the end of the structure. Place in the structure previously occupied by u16 core_req
+ * is kept but renamed to compat_core_req and as such it can be used in ioctl call for job submission
+ * as long as UK 10.2 legacy is supported. Once when this support ends, this field can be left
+ * for possible future use. */
+typedef struct base_jd_atom_v2 {
+ u64 jc; /**< job-chain GPU address */
+ struct base_jd_udata udata; /**< user data */
+ kbase_pointer extres_list; /**< list of external resources */
+ u16 nr_extres; /**< nr of external resources */
+ u16 compat_core_req; /**< core requirements which correspond to the legacy support for UK 10.2 */
+ struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field,
+ this is done in order to reduce possibility of improper assigment of a dependency field */
+ base_atom_id atom_number; /**< unique number to identify the atom */
+ base_jd_prio prio; /**< Atom priority. Refer to @ref base_jd_prio for more details */
+ u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
+ u8 padding[1];
+ base_jd_core_req core_req; /**< core requirements */
+} base_jd_atom_v2;
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+struct base_jd_atom_v2_uk6 {
+ u64 jc; /**< job-chain GPU address */
+ struct base_jd_udata udata; /**< user data */
+ kbase_pointer extres_list; /**< list of external resources */
+ u16 nr_extres; /**< nr of external resources */
+ u16 core_req; /**< core requirements */
+ base_atom_id pre_dep[2]; /**< pre-dependencies */
+ base_atom_id atom_number; /**< unique number to identify the atom */
+ base_jd_prio prio; /**< priority - smaller is higher priority */
+ u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
+ u8 padding[7];
+};
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+
+typedef enum base_external_resource_access {
+ BASE_EXT_RES_ACCESS_SHARED,
+ BASE_EXT_RES_ACCESS_EXCLUSIVE
+} base_external_resource_access;
+
+typedef struct base_external_resource {
+ u64 ext_resource;
+} base_external_resource;
+
+
+/**
+ * The maximum number of external resources which can be mapped/unmapped
+ * in a single request.
+ */
+#define BASE_EXT_RES_COUNT_MAX 10
+
+/**
+ * struct base_external_resource_list - Structure which describes a list of
+ * external resources.
+ * @count: The number of resources.
+ * @ext_res: Array of external resources which is
+ * sized at allocation time.
+ */
+struct base_external_resource_list {
+ u64 count;
+ struct base_external_resource ext_res[1];
+};
+
+struct base_jd_debug_copy_buffer {
+ u64 address;
+ u64 size;
+ struct base_external_resource extres;
+};
+
+/**
+ * @brief Setter for a dependency structure
+ *
+ * @param[in] dep The kbase jd atom dependency to be initialized.
+ * @param id The atom_id to be assigned.
+ * @param dep_type The dep_type to be assigned.
+ *
+ */
+static inline void base_jd_atom_dep_set(struct base_dependency *dep,
+ base_atom_id id, base_jd_dep_type dep_type)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ /*
+ * make sure we don't set not allowed combinations
+ * of atom_id/dependency_type.
+ */
+ LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) ||
+ (id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID));
+
+ dep->atom_id = id;
+ dep->dependency_type = dep_type;
+}
+
+/**
+ * @brief Make a copy of a dependency structure
+ *
+ * @param[in,out] dep The kbase jd atom dependency to be written.
+ * @param[in] from The dependency to make a copy from.
+ *
+ */
+static inline void base_jd_atom_dep_copy(struct base_dependency *dep,
+ const struct base_dependency *from)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type);
+}
+
+/**
+ * @brief Soft-atom fence trigger setup.
+ *
+ * Sets up an atom to be a SW-only atom signaling a fence
+ * when it reaches the run state.
+ *
+ * Using the existing base dependency system the fence can
+ * be set to trigger when a GPU job has finished.
+ *
+ * The base fence object must not be terminated until the atom
+ * has been submitted to @a base_jd_submit_bag and @a base_jd_submit_bag has returned.
+ *
+ * @a fence must be a valid fence set up with @a base_fence_init.
+ * Calling this function with a uninitialized fence results in undefined behavior.
+ *
+ * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom
+ * @param[in] fence The base fence object to trigger.
+ */
+static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
+{
+ LOCAL_ASSERT(atom);
+ LOCAL_ASSERT(fence);
+ LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE);
+ LOCAL_ASSERT(fence->basep.stream_fd >= 0);
+ atom->jc = (uintptr_t) fence;
+ atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER;
+}
+
+/**
+ * @brief Soft-atom fence wait setup.
+ *
+ * Sets up an atom to be a SW-only atom waiting on a fence.
+ * When the fence becomes triggered the atom becomes runnable
+ * and completes immediately.
+ *
+ * Using the existing base dependency system the fence can
+ * be set to block a GPU job until it has been triggered.
+ *
+ * The base fence object must not be terminated until the atom
+ * has been submitted to @a base_jd_submit_bag and @a base_jd_submit_bag has returned.
+ *
+ * @a fence must be a valid fence set up with @a base_fence_init or @a base_fence_import.
+ * Calling this function with a uninitialized fence results in undefined behavior.
+ *
+ * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom
+ * @param[in] fence The base fence object to wait on
+ */
+static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
+{
+ LOCAL_ASSERT(atom);
+ LOCAL_ASSERT(fence);
+ LOCAL_ASSERT(fence->basep.fd >= 0);
+ atom->jc = (uintptr_t) fence;
+ atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
+}
+
+/**
+ * @brief External resource info initialization.
+ *
+ * Sets up an external resource object to reference
+ * a memory allocation and the type of access requested.
+ *
+ * @param[in] res The resource object to initialize
+ * @param handle The handle to the imported memory object, must be
+ * obtained by calling @ref base_mem_as_import_handle().
+ * @param access The type of access requested
+ */
+static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access)
+{
+ u64 address;
+
+ address = handle.basep.handle;
+
+ LOCAL_ASSERT(res != NULL);
+ LOCAL_ASSERT(0 == (address & LOCAL_PAGE_LSB));
+ LOCAL_ASSERT(access == BASE_EXT_RES_ACCESS_SHARED || access == BASE_EXT_RES_ACCESS_EXCLUSIVE);
+
+ res->ext_resource = address | (access & LOCAL_PAGE_LSB);
+}
+
+/**
+ * @brief Job chain event code bits
+ * Defines the bits used to create ::base_jd_event_code
+ */
+enum {
+ BASE_JD_SW_EVENT_KERNEL = (1u << 15), /**< Kernel side event */
+ BASE_JD_SW_EVENT = (1u << 14), /**< SW defined event */
+ BASE_JD_SW_EVENT_SUCCESS = (1u << 13), /**< Event idicates success (SW events only) */
+ BASE_JD_SW_EVENT_JOB = (0u << 11), /**< Job related event */
+ BASE_JD_SW_EVENT_BAG = (1u << 11), /**< Bag related event */
+ BASE_JD_SW_EVENT_INFO = (2u << 11), /**< Misc/info event */
+ BASE_JD_SW_EVENT_RESERVED = (3u << 11), /**< Reserved event type */
+ BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11) /**< Mask to extract the type from an event code */
+};
+
+/**
+ * @brief Job chain event codes
+ *
+ * HW and low-level SW events are represented by event codes.
+ * The status of jobs which succeeded are also represented by
+ * an event code (see ::BASE_JD_EVENT_DONE).
+ * Events are usually reported as part of a ::base_jd_event.
+ *
+ * The event codes are encoded in the following way:
+ * @li 10:0 - subtype
+ * @li 12:11 - type
+ * @li 13 - SW success (only valid if the SW bit is set)
+ * @li 14 - SW event (HW event if not set)
+ * @li 15 - Kernel event (should never be seen in userspace)
+ *
+ * Events are split up into ranges as follows:
+ * - BASE_JD_EVENT_RANGE_\<description\>_START
+ * - BASE_JD_EVENT_RANGE_\<description\>_END
+ *
+ * \a code is in \<description\>'s range when:
+ * - <tt>BASE_JD_EVENT_RANGE_\<description\>_START <= code < BASE_JD_EVENT_RANGE_\<description\>_END </tt>
+ *
+ * Ranges can be asserted for adjacency by testing that the END of the previous
+ * is equal to the START of the next. This is useful for optimizing some tests
+ * for range.
+ *
+ * A limitation is that the last member of this enum must explicitly be handled
+ * (with an assert-unreachable statement) in switch statements that use
+ * variables of this type. Otherwise, the compiler warns that we have not
+ * handled that enum value.
+ */
+typedef enum base_jd_event_code {
+ /* HW defined exceptions */
+
+ /** Start of HW Non-fault status codes
+ *
+ * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
+ * because the job was hard-stopped
+ */
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
+
+ /* non-fatal exceptions */
+ BASE_JD_EVENT_NOT_STARTED = 0x00, /**< Can't be seen by userspace, treated as 'previous job done' */
+ BASE_JD_EVENT_DONE = 0x01,
+ BASE_JD_EVENT_STOPPED = 0x03, /**< Can't be seen by userspace, becomes TERMINATED, DONE or JOB_CANCELLED */
+ BASE_JD_EVENT_TERMINATED = 0x04, /**< This is actually a fault status code - the job was hard stopped */
+ BASE_JD_EVENT_ACTIVE = 0x08, /**< Can't be seen by userspace, jobs only returned on complete/fail/cancel */
+
+ /** End of HW Non-fault status codes
+ *
+ * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
+ * because the job was hard-stopped
+ */
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
+
+ /** Start of HW fault and SW Error status codes */
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
+
+ /* job exceptions */
+ BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
+ BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
+ BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
+ BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
+ BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
+ BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
+ BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
+ BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
+ BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
+ BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
+ BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
+ BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
+ BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
+ BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
+ BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
+ BASE_JD_EVENT_STATE_FAULT = 0x5A,
+ BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
+ BASE_JD_EVENT_UNKNOWN = 0x7F,
+
+ /* GPU exceptions */
+ BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
+ BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
+
+ /* MMU exceptions */
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
+ BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
+ BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
+
+ /* SW defined exceptions */
+ BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+ BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+ BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+ BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+ BASE_JD_EVENT_FORCE_REPLAY = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005,
+
+ BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+
+ /** End of HW fault and SW Error status codes */
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ /** Start of SW Success status codes */
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000,
+
+ BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_BAG | 0x000,
+ BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
+
+ /** End of SW Success status codes */
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ /** Start of Kernel-only status codes. Such codes are never returned to user-space */
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000,
+ BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
+
+ /** End of Kernel-only status codes. */
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
+} base_jd_event_code;
+
+/**
+ * @brief Event reporting structure
+ *
+ * This structure is used by the kernel driver to report information
+ * about GPU events. The can either be HW-specific events or low-level
+ * SW events, such as job-chain completion.
+ *
+ * The event code contains an event type field which can be extracted
+ * by ANDing with ::BASE_JD_SW_EVENT_TYPE_MASK.
+ *
+ * Based on the event type base_jd_event::data holds:
+ * @li ::BASE_JD_SW_EVENT_JOB : the offset in the ring-buffer for the completed
+ * job-chain
+ * @li ::BASE_JD_SW_EVENT_BAG : The address of the ::base_jd_bag that has
+ * been completed (ie all contained job-chains have been completed).
+ * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used
+ */
+typedef struct base_jd_event_v2 {
+ base_jd_event_code event_code; /**< event code */
+ base_atom_id atom_number; /**< the atom number that has completed */
+ struct base_jd_udata udata; /**< user data */
+} base_jd_event_v2;
+
+/**
+ * Padding required to ensure that the @ref struct base_dump_cpu_gpu_counters structure fills
+ * a full cache line.
+ */
+
+#define BASE_CPU_GPU_CACHE_LINE_PADDING (36)
+
+
+/**
+ * @brief Structure for BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS jobs.
+ *
+ * This structure is stored into the memory pointed to by the @c jc field of @ref base_jd_atom.
+ *
+ * This structure must be padded to ensure that it will occupy whole cache lines. This is to avoid
+ * cases where access to pages containing the structure is shared between cached and un-cached
+ * memory regions, which would cause memory corruption. Here we set the structure size to be 64 bytes
+ * which is the cache line for ARM A15 processors.
+ */
+
+typedef struct base_dump_cpu_gpu_counters {
+ u64 system_time;
+ u64 cycle_counter;
+ u64 sec;
+ u32 usec;
+ u8 padding[BASE_CPU_GPU_CACHE_LINE_PADDING];
+} base_dump_cpu_gpu_counters;
+
+
+
+/** @} end group base_user_api_job_dispatch */
+
+#define GPU_MAX_JOB_SLOTS 16
+
+/**
+ * @page page_base_user_api_gpuprops User-side Base GPU Property Query API
+ *
+ * The User-side Base GPU Property Query API encapsulates two
+ * sub-modules:
+ *
+ * - @ref base_user_api_gpuprops_dyn "Dynamic GPU Properties"
+ * - @ref base_plat_config_gpuprops "Base Platform Config GPU Properties"
+ *
+ * There is a related third module outside of Base, which is owned by the MIDG
+ * module:
+ * - @ref gpu_props_static "Midgard Compile-time GPU Properties"
+ *
+ * Base only deals with properties that vary between different Midgard
+ * implementations - the Dynamic GPU properties and the Platform Config
+ * properties.
+ *
+ * For properties that are constant for the Midgard Architecture, refer to the
+ * MIDG module. However, we will discuss their relevance here <b>just to
+ * provide background information.</b>
+ *
+ * @section sec_base_user_api_gpuprops_about About the GPU Properties in Base and MIDG modules
+ *
+ * The compile-time properties (Platform Config, Midgard Compile-time
+ * properties) are exposed as pre-processor macros.
+ *
+ * Complementing the compile-time properties are the Dynamic GPU
+ * Properties, which act as a conduit for the Midgard Configuration
+ * Discovery.
+ *
+ * In general, the dynamic properties are present to verify that the platform
+ * has been configured correctly with the right set of Platform Config
+ * Compile-time Properties.
+ *
+ * As a consistant guide across the entire DDK, the choice for dynamic or
+ * compile-time should consider the following, in order:
+ * -# Can the code be written so that it doesn't need to know the
+ * implementation limits at all?
+ * -# If you need the limits, get the information from the Dynamic Property
+ * lookup. This should be done once as you fetch the context, and then cached
+ * as part of the context data structure, so it's cheap to access.
+ * -# If there's a clear and arguable inefficiency in using Dynamic Properties,
+ * then use a Compile-Time Property (Platform Config, or Midgard Compile-time
+ * property). Examples of where this might be sensible follow:
+ * - Part of a critical inner-loop
+ * - Frequent re-use throughout the driver, causing significant extra load
+ * instructions or control flow that would be worthwhile optimizing out.
+ *
+ * We cannot provide an exhaustive set of examples, neither can we provide a
+ * rule for every possible situation. Use common sense, and think about: what
+ * the rest of the driver will be doing; how the compiler might represent the
+ * value if it is a compile-time constant; whether an OEM shipping multiple
+ * devices would benefit much more from a single DDK binary, instead of
+ * insignificant micro-optimizations.
+ *
+ * @section sec_base_user_api_gpuprops_dyn Dynamic GPU Properties
+ *
+ * Dynamic GPU properties are presented in two sets:
+ * -# the commonly used properties in @ref base_gpu_props, which have been
+ * unpacked from GPU register bitfields.
+ * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props
+ * (also a member of @ref base_gpu_props). All of these are presented in
+ * the packed form, as presented by the GPU registers themselves.
+ *
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
+ * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
+ * behaving differently?". In this case, all information about the
+ * configuration is potentially useful, but it <b>does not need to be processed
+ * by the driver</b>. Instead, the raw registers can be processed by the Mali
+ * Tools software on the host PC.
+ *
+ * The properties returned extend the Midgard Configuration Discovery
+ * registers. For example, GPU clock speed is not specified in the Midgard
+ * Architecture, but is <b>necessary for OpenCL's clGetDeviceInfo() function</b>.
+ *
+ * The GPU properties are obtained by a call to
+ * _mali_base_get_gpu_props(). This simply returns a pointer to a const
+ * base_gpu_props structure. It is constant for the life of a base
+ * context. Multiple calls to _mali_base_get_gpu_props() to a base context
+ * return the same pointer to a constant structure. This avoids cache pollution
+ * of the common data.
+ *
+ * This pointer must not be freed, because it does not point to the start of a
+ * region allocated by the memory allocator; instead, just close the @ref
+ * base_context.
+ *
+ *
+ * @section sec_base_user_api_gpuprops_config Platform Config Compile-time Properties
+ *
+ * The Platform Config File sets up gpu properties that are specific to a
+ * certain platform. Properties that are 'Implementation Defined' in the
+ * Midgard Architecture spec are placed here.
+ *
+ * @note Reference configurations are provided for Midgard Implementations, such as
+ * the Mali-T600 family. The customer need not repeat this information, and can select one of
+ * these reference configurations. For example, VA_BITS, PA_BITS and the
+ * maximum number of samples per pixel might vary between Midgard Implementations, but
+ * \b not for platforms using the Mali-T604. This information is placed in
+ * the reference configuration files.
+ *
+ * The System Integrator creates the following structure:
+ * - platform_XYZ
+ * - platform_XYZ/plat
+ * - platform_XYZ/plat/plat_config.h
+ *
+ * They then edit plat_config.h, using the example plat_config.h files as a
+ * guide.
+ *
+ * At the very least, the customer must set @ref CONFIG_GPU_CORE_TYPE, and will
+ * receive a helpful \#error message if they do not do this correctly. This
+ * selects the Reference Configuration for the Midgard Implementation. The rationale
+ * behind this decision (against asking the customer to write \#include
+ * <gpus/mali_t600.h> in their plat_config.h) is as follows:
+ * - This mechanism 'looks' like a regular config file (such as Linux's
+ * .config)
+ * - It is difficult to get wrong in a way that will produce strange build
+ * errors:
+ * - They need not know where the mali_t600.h, other_midg_gpu.h etc. files are stored - and
+ * so they won't accidentally pick another file with 'mali_t600' in its name
+ * - When the build doesn't work, the System Integrator may think the DDK is
+ * doesn't work, and attempt to fix it themselves:
+ * - For the @ref CONFIG_GPU_CORE_TYPE mechanism, the only way to get past the
+ * error is to set @ref CONFIG_GPU_CORE_TYPE, and this is what the \#error tells
+ * you.
+ * - For a \#include mechanism, checks must still be made elsewhere, which the
+ * System Integrator may try working around by setting \#defines (such as
+ * VA_BITS) themselves in their plat_config.h. In the worst case, they may
+ * set the prevention-mechanism \#define of
+ * "A_CORRECT_MIDGARD_CORE_WAS_CHOSEN".
+ * - In this case, they would believe they are on the right track, because
+ * the build progresses with their fix, but with errors elsewhere.
+ *
+ * However, there is nothing to prevent the customer using \#include to organize
+ * their own configurations files hierarchically.
+ *
+ * The mechanism for the header file processing is as follows:
+ *
+ * @dot
+ digraph plat_config_mechanism {
+ rankdir=BT
+ size="6,6"
+
+ "mali_base.h";
+ "gpu/mali_gpu.h";
+
+ node [ shape=box ];
+ {
+ rank = same; ordering = out;
+
+ "gpu/mali_gpu_props.h";
+ "base/midg_gpus/mali_t600.h";
+ "base/midg_gpus/other_midg_gpu.h";
+ }
+ { rank = same; "plat/plat_config.h"; }
+ {
+ rank = same;
+ "gpu/mali_gpu.h" [ shape=box ];
+ gpu_chooser [ label="" style="invisible" width=0 height=0 fixedsize=true ];
+ select_gpu [ label="Mali-T600 | Other\n(select_gpu.h)" shape=polygon,sides=4,distortion=0.25 width=3.3 height=0.99 fixedsize=true ] ;
+ }
+ node [ shape=box ];
+ { rank = same; "plat/plat_config.h"; }
+ { rank = same; "mali_base.h"; }
+
+ "mali_base.h" -> "gpu/mali_gpu.h" -> "gpu/mali_gpu_props.h";
+ "mali_base.h" -> "plat/plat_config.h" ;
+ "mali_base.h" -> select_gpu ;
+
+ "plat/plat_config.h" -> gpu_chooser [style="dotted,bold" dir=none weight=4] ;
+ gpu_chooser -> select_gpu [style="dotted,bold"] ;
+
+ select_gpu -> "base/midg_gpus/mali_t600.h" ;
+ select_gpu -> "base/midg_gpus/other_midg_gpu.h" ;
+ }
+ @enddot
+ *
+ *
+ * @section sec_base_user_api_gpuprops_kernel Kernel Operation
+ *
+ * During Base Context Create time, user-side makes a single kernel call:
+ * - A call to fill user memory with GPU information structures
+ *
+ * The kernel-side will fill the provided the entire processed @ref base_gpu_props
+ * structure, because this information is required in both
+ * user and kernel side; it does not make sense to decode it twice.
+ *
+ * Coherency groups must be derived from the bitmasks, but this can be done
+ * kernel side, and just once at kernel startup: Coherency groups must already
+ * be known kernel-side, to support chains that specify a 'Only Coherent Group'
+ * SW requirement, or 'Only Coherent Group with Tiler' SW requirement.
+ *
+ * @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation
+ * Creation of the coherent group data is done at device-driver startup, and so
+ * is one-time. This will most likely involve a loop with CLZ, shifting, and
+ * bit clearing on the L2_PRESENT mask, depending on whether the
+ * system is L2 Coherent. The number of shader cores is done by a
+ * population count, since faulty cores may be disabled during production,
+ * producing a non-contiguous mask.
+ *
+ * The memory requirements for this algoirthm can be determined either by a u64
+ * population count on the L2_PRESENT mask (a LUT helper already is
+ * requried for the above), or simple assumption that there can be no more than
+ * 16 coherent groups, since core groups are typically 4 cores.
+ */
+
+/**
+ * @addtogroup base_user_api_gpuprops User-side Base GPU Property Query APIs
+ * @{
+ */
+
+/**
+ * @addtogroup base_user_api_gpuprops_dyn Dynamic HW Properties
+ * @{
+ */
+
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
+
+#define BASE_MAX_COHERENT_GROUPS 16
+
+struct mali_base_gpu_core_props {
+ /**
+ * Product specific value.
+ */
+ u32 product_id;
+
+ /**
+ * Status of the GPU release.
+ * No defined values, but starts at 0 and increases by one for each release
+ * status (alpha, beta, EAC, etc.).
+ * 4 bit values (0-15).
+ */
+ u16 version_status;
+
+ /**
+ * Minor release number of the GPU. "P" part of an "RnPn" release number.
+ * 8 bit values (0-255).
+ */
+ u16 minor_revision;
+
+ /**
+ * Major release number of the GPU. "R" part of an "RnPn" release number.
+ * 4 bit values (0-15).
+ */
+ u16 major_revision;
+
+ u16 padding;
+
+ /**
+ * @usecase GPU clock speed is not specified in the Midgard Architecture, but is
+ * <b>necessary for OpenCL's clGetDeviceInfo() function</b>.
+ */
+ u32 gpu_speed_mhz;
+
+ /**
+ * @usecase GPU clock max/min speed is required for computing best/worst case
+ * in tasks as job scheduling ant irq_throttling. (It is not specified in the
+ * Midgard Architecture).
+ */
+ u32 gpu_freq_khz_max;
+ u32 gpu_freq_khz_min;
+
+ /**
+ * Size of the shader program counter, in bits.
+ */
+ u32 log2_program_counter_size;
+
+ /**
+ * TEXTURE_FEATURES_x registers, as exposed by the GPU. This is a
+ * bitpattern where a set bit indicates that the format is supported.
+ *
+ * Before using a texture format, it is recommended that the corresponding
+ * bit be checked.
+ */
+ u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+
+ /**
+ * Theoretical maximum memory available to the GPU. It is unlikely that a
+ * client will be able to allocate all of this memory for their own
+ * purposes, but this at least provides an upper bound on the memory
+ * available to the GPU.
+ *
+ * This is required for OpenCL's clGetDeviceInfo() call when
+ * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The
+ * client will not be expecting to allocate anywhere near this value.
+ */
+ u64 gpu_available_memory_size;
+};
+
+/**
+ *
+ * More information is possible - but associativity and bus width are not
+ * required by upper-level apis.
+ */
+struct mali_base_gpu_l2_cache_props {
+ u8 log2_line_size;
+ u8 log2_cache_size;
+ u8 num_l2_slices; /* Number of L2C slices. 1 or higher */
+ u8 padding[5];
+};
+
+struct mali_base_gpu_tiler_props {
+ u32 bin_size_bytes; /* Max is 4*2^15 */
+ u32 max_active_levels; /* Max is 2^15 */
+};
+
+/**
+ * GPU threading system details.
+ */
+struct mali_base_gpu_thread_props {
+ u32 max_threads; /* Max. number of threads per core */
+ u32 max_workgroup_size; /* Max. number of threads per workgroup */
+ u32 max_barrier_size; /* Max. number of threads that can synchronize on a simple barrier */
+ u16 max_registers; /* Total size [1..65535] of the register file available per core. */
+ u8 max_task_queue; /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
+ u8 max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
+ u8 impl_tech; /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
+ u8 padding[7];
+};
+
+/**
+ * @brief descriptor for a coherent group
+ *
+ * \c core_mask exposes all cores in that coherent group, and \c num_cores
+ * provides a cached population-count for that mask.
+ *
+ * @note Whilst all cores are exposed in the mask, not all may be available to
+ * the application, depending on the Kernel Power policy.
+ *
+ * @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage.
+ */
+struct mali_base_gpu_coherent_group {
+ u64 core_mask; /**< Core restriction mask required for the group */
+ u16 num_cores; /**< Number of cores in the group */
+ u16 padding[3];
+};
+
+/**
+ * @brief Coherency group information
+ *
+ * Note that the sizes of the members could be reduced. However, the \c group
+ * member might be 8-byte aligned to ensure the u64 core_mask is 8-byte
+ * aligned, thus leading to wastage if the other members sizes were reduced.
+ *
+ * The groups are sorted by core mask. The core masks are non-repeating and do
+ * not intersect.
+ */
+struct mali_base_gpu_coherent_group_info {
+ u32 num_groups;
+
+ /**
+ * Number of core groups (coherent or not) in the GPU. Equivalent to the number of L2 Caches.
+ *
+ * The GPU Counter dumping writes 2048 bytes per core group, regardless of
+ * whether the core groups are coherent or not. Hence this member is needed
+ * to calculate how much memory is required for dumping.
+ *
+ * @note Do not use it to work out how many valid elements are in the
+ * group[] member. Use num_groups instead.
+ */
+ u32 num_core_groups;
+
+ /**
+ * Coherency features of the memory, accessed by @ref gpu_mem_features
+ * methods
+ */
+ u32 coherency;
+
+ u32 padding;
+
+ /**
+ * Descriptors of coherent groups
+ */
+ struct mali_base_gpu_coherent_group group[BASE_MAX_COHERENT_GROUPS];
+};
+
+/**
+ * A complete description of the GPU's Hardware Configuration Discovery
+ * registers.
+ *
+ * The information is presented inefficiently for access. For frequent access,
+ * the values should be better expressed in an unpacked form in the
+ * base_gpu_props structure.
+ *
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
+ * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
+ * behaving differently?". In this case, all information about the
+ * configuration is potentially useful, but it <b>does not need to be processed
+ * by the driver</b>. Instead, the raw registers can be processed by the Mali
+ * Tools software on the host PC.
+ *
+ */
+struct gpu_raw_gpu_props {
+ u64 shader_present;
+ u64 tiler_present;
+ u64 l2_present;
+ u64 unused_1; /* keep for backward compatibility */
+
+ u32 l2_features;
+ u32 suspend_size; /* API 8.2+ */
+ u32 mem_features;
+ u32 mmu_features;
+
+ u32 as_present;
+
+ u32 js_present;
+ u32 js_features[GPU_MAX_JOB_SLOTS];
+ u32 tiler_features;
+ u32 texture_features[3];
+
+ u32 gpu_id;
+
+ u32 thread_max_threads;
+ u32 thread_max_workgroup_size;
+ u32 thread_max_barrier_size;
+ u32 thread_features;
+
+ /*
+ * Note: This is the _selected_ coherency mode rather than the
+ * available modes as exposed in the coherency_features register.
+ */
+ u32 coherency_mode;
+};
+
+/**
+ * Return structure for _mali_base_get_gpu_props().
+ *
+ * NOTE: the raw_props member in this datastructure contains the register
+ * values from which the value of the other members are derived. The derived
+ * members exist to allow for efficient access and/or shielding the details
+ * of the layout of the registers.
+ *
+ */
+typedef struct mali_base_gpu_props {
+ struct mali_base_gpu_core_props core_props;
+ struct mali_base_gpu_l2_cache_props l2_props;
+ u64 unused_1; /* keep for backwards compatibility */
+ struct mali_base_gpu_tiler_props tiler_props;
+ struct mali_base_gpu_thread_props thread_props;
+
+ /** This member is large, likely to be 128 bytes */
+ struct gpu_raw_gpu_props raw_props;
+
+ /** This must be last member of the structure */
+ struct mali_base_gpu_coherent_group_info coherency_info;
+} base_gpu_props;
+
+/** @} end group base_user_api_gpuprops_dyn */
+
+/** @} end group base_user_api_gpuprops */
+
+/**
+ * @addtogroup base_user_api_core User-side Base core APIs
+ * @{
+ */
+
+/**
+ * \enum base_context_create_flags
+ *
+ * Flags to pass to ::base_context_init.
+ * Flags can be ORed together to enable multiple things.
+ *
+ * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
+ * not collide with them.
+ */
+enum base_context_create_flags {
+ /** No flags set */
+ BASE_CONTEXT_CREATE_FLAG_NONE = 0,
+
+ /** Base context is embedded in a cctx object (flag used for CINSTR software counter macros) */
+ BASE_CONTEXT_CCTX_EMBEDDED = (1u << 0),
+
+ /** Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled. */
+ BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1)
+};
+
+/**
+ * Bitpattern describing the ::base_context_create_flags that can be passed to base_context_init()
+ */
+#define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \
+ (((u32)BASE_CONTEXT_CCTX_EMBEDDED) | \
+ ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED))
+
+/**
+ * Bitpattern describing the ::base_context_create_flags that can be passed to the kernel
+ */
+#define BASE_CONTEXT_CREATE_KERNEL_FLAGS \
+ ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
+
+/*
+ * Private flags used on the base context
+ *
+ * These start at bit 31, and run down to zero.
+ *
+ * They share the same space as @ref base_context_create_flags, and so must
+ * not collide with them.
+ */
+/** Private flag tracking whether job descriptor dumping is disabled */
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED ((u32)(1 << 31))
+
+/** @} end group base_user_api_core */
+
+/** @} end group base_user_api */
+
+/**
+ * @addtogroup base_plat_config_gpuprops Base Platform Config GPU Properties
+ * @{
+ *
+ * C Pre-processor macros are exposed here to do with Platform
+ * Config.
+ *
+ * These include:
+ * - GPU Properties that are constant on a particular Midgard Family
+ * Implementation e.g. Maximum samples per pixel on Mali-T600.
+ * - General platform config for the GPU, such as the GPU major and minor
+ * revison.
+ */
+
+/** @} end group base_plat_config_gpuprops */
+
+/**
+ * @addtogroup base_api Base APIs
+ * @{
+ */
+
+/**
+ * @brief The payload for a replay job. This must be in GPU memory.
+ */
+typedef struct base_jd_replay_payload {
+ /**
+ * Pointer to the first entry in the base_jd_replay_jc list. These
+ * will be replayed in @b reverse order (so that extra ones can be added
+ * to the head in future soft jobs without affecting this soft job)
+ */
+ u64 tiler_jc_list;
+
+ /**
+ * Pointer to the fragment job chain.
+ */
+ u64 fragment_jc;
+
+ /**
+ * Pointer to the tiler heap free FBD field to be modified.
+ */
+ u64 tiler_heap_free;
+
+ /**
+ * Hierarchy mask for the replayed fragment jobs. May be zero.
+ */
+ u16 fragment_hierarchy_mask;
+
+ /**
+ * Hierarchy mask for the replayed tiler jobs. May be zero.
+ */
+ u16 tiler_hierarchy_mask;
+
+ /**
+ * Default weight to be used for hierarchy levels not in the original
+ * mask.
+ */
+ u32 hierarchy_default_weight;
+
+ /**
+ * Core requirements for the tiler job chain
+ */
+ base_jd_core_req tiler_core_req;
+
+ /**
+ * Core requirements for the fragment job chain
+ */
+ base_jd_core_req fragment_core_req;
+} base_jd_replay_payload;
+
+#ifdef BASE_LEGACY_UK10_2_SUPPORT
+typedef struct base_jd_replay_payload_uk10_2 {
+ u64 tiler_jc_list;
+ u64 fragment_jc;
+ u64 tiler_heap_free;
+ u16 fragment_hierarchy_mask;
+ u16 tiler_hierarchy_mask;
+ u32 hierarchy_default_weight;
+ u16 tiler_core_req;
+ u16 fragment_core_req;
+ u8 padding[4];
+} base_jd_replay_payload_uk10_2;
+#endif /* BASE_LEGACY_UK10_2_SUPPORT */
+
+/**
+ * @brief An entry in the linked list of job chains to be replayed. This must
+ * be in GPU memory.
+ */
+typedef struct base_jd_replay_jc {
+ /**
+ * Pointer to next entry in the list. A setting of NULL indicates the
+ * end of the list.
+ */
+ u64 next;
+
+ /**
+ * Pointer to the job chain.
+ */
+ u64 jc;
+
+} base_jd_replay_jc;
+
+/* Maximum number of jobs allowed in a fragment chain in the payload of a
+ * replay job */
+#define BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT 256
+
+/** @} end group base_api */
+
+typedef struct base_profiling_controls {
+ u32 profiling_controls[FBDUMP_CONTROL_MAX];
+} base_profiling_controls;
+
+/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
+ * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) */
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+
+#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS)
+
+#endif /* _BASE_KERNEL_H_ */
diff --git a/midgard-0.r2p0/mali_base_kernel_sync.h b/midgard-0.r2p0/mali_base_kernel_sync.h
new file mode 100755
index 0000000..a24791f
--- /dev/null
+++ b/midgard-0.r2p0/mali_base_kernel_sync.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file
+ * Base cross-proccess sync API.
+ */
+
+#ifndef _BASE_KERNEL_SYNC_H_
+#define _BASE_KERNEL_SYNC_H_
+
+#include <linux/ioctl.h>
+
+#define STREAM_IOC_MAGIC '~'
+
+/* Fence insert.
+ *
+ * Inserts a fence on the stream operated on.
+ * Fence can be waited via a base fence wait soft-job
+ * or triggered via a base fence trigger soft-job.
+ *
+ * Fences must be cleaned up with close when no longer needed.
+ *
+ * No input/output arguments.
+ * Returns
+ * >=0 fd
+ * <0 error code
+ */
+#define STREAM_IOC_FENCE_INSERT _IO(STREAM_IOC_MAGIC, 0)
+
+#endif /* _BASE_KERNEL_SYNC_H_ */
diff --git a/midgard-0.r2p0/mali_base_mem_priv.h b/midgard-0.r2p0/mali_base_mem_priv.h
new file mode 100755
index 0000000..4a98a72
--- /dev/null
+++ b/midgard-0.r2p0/mali_base_mem_priv.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _BASE_MEM_PRIV_H_
+#define _BASE_MEM_PRIV_H_
+
+#define BASE_SYNCSET_OP_MSYNC (1U << 0)
+#define BASE_SYNCSET_OP_CSYNC (1U << 1)
+
+/*
+ * This structure describe a basic memory coherency operation.
+ * It can either be:
+ * @li a sync from CPU to Memory:
+ * - type = ::BASE_SYNCSET_OP_MSYNC
+ * - mem_handle = a handle to the memory object on which the operation
+ * is taking place
+ * - user_addr = the address of the range to be synced
+ * - size = the amount of data to be synced, in bytes
+ * - offset is ignored.
+ * @li a sync from Memory to CPU:
+ * - type = ::BASE_SYNCSET_OP_CSYNC
+ * - mem_handle = a handle to the memory object on which the operation
+ * is taking place
+ * - user_addr = the address of the range to be synced
+ * - size = the amount of data to be synced, in bytes.
+ * - offset is ignored.
+ */
+struct basep_syncset {
+ base_mem_handle mem_handle;
+ u64 user_addr;
+ u64 size;
+ u8 type;
+ u8 padding[7];
+};
+
+#endif
diff --git a/midgard-0.r2p0/mali_base_vendor_specific_func.h b/midgard-0.r2p0/mali_base_vendor_specific_func.h
new file mode 100755
index 0000000..be454a2
--- /dev/null
+++ b/midgard-0.r2p0/mali_base_vendor_specific_func.h
@@ -0,0 +1,24 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+#ifndef _BASE_VENDOR_SPEC_FUNC_H_
+#define _BASE_VENDOR_SPEC_FUNC_H_
+
+int kbase_get_vendor_specific_cpu_clock_speed(u32 * const);
+
+#endif /*_BASE_VENDOR_SPEC_FUNC_H_*/
diff --git a/midgard-0.r2p0/mali_kbase.h b/midgard-0.r2p0/mali_kbase.h
new file mode 100755
index 0000000..443d4b1
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase.h
@@ -0,0 +1,607 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_H_
+#define _KBASE_H_
+
+#include <mali_malisw.h>
+
+#include <mali_kbase_debug.h>
+
+#include <asm/page.h>
+
+#include <linux/atomic.h>
+#include <linux/highmem.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "mali_base_kernel.h"
+#include <mali_kbase_uku.h>
+#include <mali_kbase_linux.h>
+
+/*
+ * Include mali_kbase_defs.h first as this provides types needed by other local
+ * header files.
+ */
+#include "mali_kbase_defs.h"
+
+#include "mali_kbase_context.h"
+#include "mali_kbase_strings.h"
+#include "mali_kbase_mem_lowlevel.h"
+#include "mali_kbase_trace_timeline.h"
+#include "mali_kbase_js.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_utility.h"
+#include "mali_kbase_gpu_memory_debugfs.h"
+#include "mali_kbase_mem_profile_debugfs.h"
+#include "mali_kbase_debug_job_fault.h"
+#include "mali_kbase_jd_debugfs.h"
+#include "mali_kbase_gpuprops.h"
+#include "mali_kbase_jm.h"
+#include "mali_kbase_vinstr.h"
+#include "mali_kbase_ipa.h"
+#ifdef CONFIG_GPU_TRACEPOINTS
+#include <trace/events/gpu.h>
+#endif
+/**
+ * @page page_base_kernel_main Kernel-side Base (KBase) APIs
+ *
+ * The Kernel-side Base (KBase) APIs are divided up as follows:
+ * - @subpage page_kbase_js_policy
+ */
+
+/**
+ * @defgroup base_kbase_api Kernel-side Base (KBase) APIs
+ */
+
+struct kbase_device *kbase_device_alloc(void);
+/*
+* note: configuration attributes member of kbdev needs to have
+* been setup before calling kbase_device_init
+*/
+
+/*
+* API to acquire device list semaphore and return pointer
+* to the device list head
+*/
+const struct list_head *kbase_dev_list_get(void);
+/* API to release the device list semaphore */
+void kbase_dev_list_put(const struct list_head *dev_list);
+
+int kbase_device_init(struct kbase_device * const kbdev);
+void kbase_device_term(struct kbase_device *kbdev);
+void kbase_device_free(struct kbase_device *kbdev);
+int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
+
+/* Needed for gator integration and for reporting vsync information */
+struct kbase_device *kbase_find_device(int minor);
+void kbase_release_device(struct kbase_device *kbdev);
+
+void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value);
+
+u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control);
+
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat);
+void kbase_destroy_context(struct kbase_context *kctx);
+
+int kbase_jd_init(struct kbase_context *kctx);
+void kbase_jd_exit(struct kbase_context *kctx);
+#ifdef BASE_LEGACY_UK6_SUPPORT
+int kbase_jd_submit(struct kbase_context *kctx,
+ const struct kbase_uk_job_submit *submit_data,
+ int uk6_atom);
+#else
+int kbase_jd_submit(struct kbase_context *kctx,
+ const struct kbase_uk_job_submit *submit_data);
+#endif
+
+/**
+ * kbase_jd_done_worker - Handle a job completion
+ * @data: a &struct work_struct
+ *
+ * This function requeues the job from the runpool (if it was soft-stopped or
+ * removed from NEXT registers).
+ *
+ * Removes it from the system if it finished/failed/was cancelled.
+ *
+ * Resolves dependencies to add dependent jobs to the context, potentially
+ * starting them if necessary (which may add more references to the context)
+ *
+ * Releases the reference to the context from the no-longer-running job.
+ *
+ * Handles retrying submission outside of IRQ context if it failed from within
+ * IRQ context.
+ */
+void kbase_jd_done_worker(struct work_struct *data);
+
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
+ kbasep_js_atom_done_code done_code);
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+void kbase_jd_zap_context(struct kbase_context *kctx);
+bool jd_done_nolock(struct kbase_jd_atom *katom,
+ struct list_head *completed_jobs_ctx);
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
+bool jd_submit_atom(struct kbase_context *kctx,
+ const struct base_jd_atom_v2 *user_atom,
+ struct kbase_jd_atom *katom);
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
+
+void kbase_job_done(struct kbase_device *kbdev, u32 done);
+
+void kbase_gpu_cacheclean(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+/**
+ * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
+ * and soft stop them
+ * @kctx: Pointer to context to check.
+ * @katom: Pointer to priority atom.
+ *
+ * Atoms from @kctx on the same job slot as @katom, which have lower priority
+ * than @katom will be soft stopped and put back in the queue, so that atoms
+ * with higher priority can run.
+ *
+ * The hwaccess_lock must be held when calling this function.
+ */
+void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom);
+
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+ struct kbase_jd_atom *target_katom);
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+ struct kbase_jd_atom *target_katom, u32 sw_flags);
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+ struct kbase_jd_atom *target_katom);
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+ base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom);
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+ struct kbase_jd_atom *target_katom);
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
+int kbase_event_pending(struct kbase_context *ctx);
+int kbase_event_init(struct kbase_context *kctx);
+void kbase_event_close(struct kbase_context *kctx);
+void kbase_event_cleanup(struct kbase_context *kctx);
+void kbase_event_wakeup(struct kbase_context *kctx);
+
+int kbase_process_soft_job(struct kbase_jd_atom *katom);
+int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
+void kbase_finish_soft_job(struct kbase_jd_atom *katom);
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
+void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom);
+void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
+int kbase_soft_event_update(struct kbase_context *kctx,
+ u64 event,
+ unsigned char new_status);
+
+bool kbase_replay_process(struct kbase_jd_atom *katom);
+
+void kbasep_soft_job_timeout_worker(unsigned long data);
+void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
+
+/* api used internally for register access. Contains validation and tracing */
+void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value);
+int kbase_device_trace_buffer_install(
+ struct kbase_context *kctx, u32 *tb, size_t size);
+void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx);
+
+/* api to be ported per OS, only need to do the raw register access */
+void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value);
+u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset);
+
+void kbasep_as_do_poke(struct work_struct *work);
+
+/** Returns the name associated with a Mali exception code
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
+ *
+ * @param[in] kbdev The kbase device that the GPU fault occurred from.
+ * @param[in] exception_code exception code
+ * @return name associated with the exception code
+ */
+const char *kbase_exception_name(struct kbase_device *kbdev,
+ u32 exception_code);
+
+/**
+ * Check whether a system suspend is in progress, or has already been suspended
+ *
+ * The caller should ensure that either kbdev->pm.active_count_lock is held, or
+ * a dmb was executed recently (to ensure the value is most
+ * up-to-date). However, without a lock the value could change afterwards.
+ *
+ * @return false if a suspend is not in progress
+ * @return !=false otherwise
+ */
+static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
+{
+ return kbdev->pm.suspending;
+}
+
+/**
+ * Return the atom's ID, as was originally supplied by userspace in
+ * base_jd_atom_v2::atom_number
+ */
+static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ int result;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->kctx == kctx);
+
+ result = katom - &kctx->jctx.atoms[0];
+ KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
+ return result;
+}
+
+/**
+ * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
+ * @kctx: Context pointer
+ * @id: ID of atom to retrieve
+ *
+ * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
+ */
+static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
+ struct kbase_context *kctx, int id)
+{
+ return &kctx->jctx.atoms[id];
+}
+
+/**
+ * Initialize the disjoint state
+ *
+ * The disjoint event count and state are both set to zero.
+ *
+ * Disjoint functions usage:
+ *
+ * The disjoint event count should be incremented whenever a disjoint event occurs.
+ *
+ * There are several cases which are regarded as disjoint behavior. Rather than just increment
+ * the counter during disjoint events we also increment the counter when jobs may be affected
+ * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
+ *
+ * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying
+ * (as part of the replay workaround). Increasing the disjoint state also increases the count of
+ * disjoint events.
+ *
+ * The disjoint state is then used to increase the count of disjoint events during job submission
+ * and job completion. Any atom submitted or completed while the disjoint state is greater than
+ * zero is regarded as a disjoint event.
+ *
+ * The disjoint event counter is also incremented immediately whenever a job is soft stopped
+ * and during context creation.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_init(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events
+ * called when a disjoint event has happened
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events only if the GPU is in a disjoint state
+ *
+ * This should be called when something happens which could be disjoint if the GPU
+ * is in a disjoint state. The state refcount keeps track of this.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev);
+
+/**
+ * Returns the count of disjoint events
+ *
+ * @param kbdev The kbase device
+ * @return the count of disjoint events
+ */
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
+
+/**
+ * Increment the refcount state indicating that the GPU is in a disjoint state.
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
+ * should be called
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_up(struct kbase_device *kbdev);
+
+/**
+ * Decrement the refcount state
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ *
+ * Called after @ref kbase_disjoint_state_up once the disjoint state is over
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_down(struct kbase_device *kbdev);
+
+/**
+ * If a job is soft stopped and the number of contexts is >= this value
+ * it is reported as a disjoint event
+ */
+#define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
+
+#if !defined(UINT64_MAX)
+ #define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+#endif
+
+#if KBASE_TRACE_ENABLE
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev);
+
+#ifndef CONFIG_MALI_SYSTEM_TRACE
+/** Add trace values about a job-slot
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0)
+
+/** Add trace values about a job-slot, with info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val)
+
+/** Add trace values about a ctx refcount
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0)
+/** Add trace values about a ctx refcount, and info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val)
+
+/** Add trace values (no slot or refcount)
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ 0, 0, 0, info_val)
+
+/** Clear the trace */
+#define KBASE_TRACE_CLEAR(kbdev) \
+ kbasep_trace_clear(kbdev)
+
+/** Dump the slot trace */
+#define KBASE_TRACE_DUMP(kbdev) \
+ kbasep_trace_dump(kbdev)
+
+/** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
+/** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
+void kbasep_trace_clear(struct kbase_device *kbdev);
+#else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+/* Dispatch kbase trace events as system trace events */
+#include <mali_linux_kbase_trace.h>
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+ trace_mali_##code(jobslot, 0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+ trace_mali_##code(jobslot, info_val)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+ trace_mali_##code(refcount, 0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+ trace_mali_##code(refcount, info_val)
+
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\
+ trace_mali_##code(gpu_addr, info_val)
+
+#define KBASE_TRACE_CLEAR(kbdev)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ } while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+#else
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(jobslot);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(jobslot);\
+ CSTD_UNUSED(info_val);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(refcount);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(info_val);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(subcode);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(val);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_CLEAR(kbdev)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ } while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ } while (0)
+#endif /* KBASE_TRACE_ENABLE */
+/** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
+void kbasep_trace_dump(struct kbase_device *kbdev);
+
+#ifdef CONFIG_MALI_DEBUG
+/**
+ * kbase_set_driver_inactive - Force driver to go inactive
+ * @kbdev: Device pointer
+ * @inactive: true if driver should go inactive, false otherwise
+ *
+ * Forcing the driver inactive will cause all future IOCTLs to wait until the
+ * driver is made active again. This is intended solely for the use of tests
+ * which require that no jobs are running while the test executes.
+ */
+void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive);
+#endif /* CONFIG_MALI_DEBUG */
+
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+/* kbase_io_history_init - initialize data struct for register access history
+ *
+ * @kbdev The register history to initialize
+ * @n The number of register accesses that the buffer could hold
+ *
+ * @return 0 if successfully initialized, failure otherwise
+ */
+int kbase_io_history_init(struct kbase_io_history *h, u16 n);
+
+/* kbase_io_history_term - uninit all resources for the register access history
+ *
+ * @h The register history to terminate
+ */
+void kbase_io_history_term(struct kbase_io_history *h);
+
+/* kbase_io_history_dump - print the register history to the kernel ring buffer
+ *
+ * @kbdev Pointer to kbase_device containing the register history to dump
+ */
+void kbase_io_history_dump(struct kbase_device *kbdev);
+
+/**
+ * kbase_io_history_resize - resize the register access history buffer.
+ *
+ * @h: Pointer to a valid register history to resize
+ * @new_size: Number of accesses the buffer could hold
+ *
+ * A successful resize will clear all recent register accesses.
+ * If resizing fails for any reason (e.g., could not allocate memory, invalid
+ * buffer size) then the original buffer will be kept intact.
+ *
+ * @return 0 if the buffer was resized, failure otherwise
+ */
+int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbase_io_history_init(...) ((int)0)
+
+#define kbase_io_history_term CSTD_NOP
+
+#define kbase_io_history_dump CSTD_NOP
+
+#define kbase_io_history_resize CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+
+#endif
+
+
+
diff --git a/midgard-0.r2p0/mali_kbase_10969_workaround.c b/midgard-0.r2p0/mali_kbase_10969_workaround.c
new file mode 100755
index 0000000..933aa28
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_10969_workaround.c
@@ -0,0 +1,209 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#include <linux/dma-mapping.h>
+#include <mali_kbase.h>
+#include <mali_kbase_10969_workaround.h>
+
+/* This function is used to solve an HW issue with single iterator GPUs.
+ * If a fragment job is soft-stopped on the edge of its bounding box, can happen that the
+ * restart index is out of bounds and the rerun causes a tile range fault. If this happens
+ * we try to clamp the restart index to a correct value and rerun the job.
+ */
+/* Mask of X and Y coordinates for the coordinates words in the descriptors*/
+#define X_COORDINATE_MASK 0x00000FFF
+#define Y_COORDINATE_MASK 0x0FFF0000
+/* Max number of words needed from the fragment shader job descriptor */
+#define JOB_HEADER_SIZE_IN_WORDS 10
+#define JOB_HEADER_SIZE (JOB_HEADER_SIZE_IN_WORDS*sizeof(u32))
+
+/* Word 0: Status Word */
+#define JOB_DESC_STATUS_WORD 0
+/* Word 1: Restart Index */
+#define JOB_DESC_RESTART_INDEX_WORD 1
+/* Word 2: Fault address low word */
+#define JOB_DESC_FAULT_ADDR_LOW_WORD 2
+/* Word 8: Minimum Tile Coordinates */
+#define FRAG_JOB_DESC_MIN_TILE_COORD_WORD 8
+/* Word 9: Maximum Tile Coordinates */
+#define FRAG_JOB_DESC_MAX_TILE_COORD_WORD 9
+
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
+{
+ struct device *dev = katom->kctx->kbdev->dev;
+ u32 clamped = 0;
+ struct kbase_va_region *region;
+ phys_addr_t *page_array;
+ u64 page_index;
+ u32 offset = katom->jc & (~PAGE_MASK);
+ u32 *page_1 = NULL;
+ u32 *page_2 = NULL;
+ u32 job_header[JOB_HEADER_SIZE_IN_WORDS];
+ void *dst = job_header;
+ u32 minX, minY, maxX, maxY;
+ u32 restartX, restartY;
+ struct page *p;
+ u32 copy_size;
+
+ dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n");
+ if (!(katom->core_req & BASE_JD_REQ_FS))
+ return 0;
+
+ kbase_gpu_vm_lock(katom->kctx);
+ region = kbase_region_tracker_find_region_enclosing_address(katom->kctx,
+ katom->jc);
+ if (!region || (region->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ page_array = kbase_get_cpu_phy_pages(region);
+ if (!page_array)
+ goto out_unlock;
+
+ page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
+
+ p = pfn_to_page(PFN_DOWN(page_array[page_index]));
+
+ /* we need the first 10 words of the fragment shader job descriptor.
+ * We need to check that the offset + 10 words is less that the page
+ * size otherwise we need to load the next page.
+ * page_size_overflow will be equal to 0 in case the whole descriptor
+ * is within the page > 0 otherwise.
+ */
+ copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE);
+
+ page_1 = kmap_atomic(p);
+
+ /* page_1 is a u32 pointer, offset is expressed in bytes */
+ page_1 += offset>>2;
+
+ kbase_sync_single_for_cpu(katom->kctx->kbdev,
+ kbase_dma_addr(p) + offset,
+ copy_size, DMA_BIDIRECTIONAL);
+
+ memcpy(dst, page_1, copy_size);
+
+ /* The data needed overflows page the dimension,
+ * need to map the subsequent page */
+ if (copy_size < JOB_HEADER_SIZE) {
+ p = pfn_to_page(PFN_DOWN(page_array[page_index + 1]));
+ page_2 = kmap_atomic(p);
+
+ kbase_sync_single_for_cpu(katom->kctx->kbdev,
+ kbase_dma_addr(p),
+ JOB_HEADER_SIZE - copy_size, DMA_BIDIRECTIONAL);
+
+ memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size);
+ }
+
+ /* We managed to correctly map one or two pages (in case of overflow) */
+ /* Get Bounding Box data and restart index from fault address low word */
+ minX = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK;
+ minY = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+ maxX = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK;
+ maxY = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+ restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK;
+ restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK;
+
+ dev_warn(dev, "Before Clamping:\n"
+ "Jobstatus: %08x\n"
+ "restartIdx: %08x\n"
+ "Fault_addr_low: %08x\n"
+ "minCoordsX: %08x minCoordsY: %08x\n"
+ "maxCoordsX: %08x maxCoordsY: %08x\n",
+ job_header[JOB_DESC_STATUS_WORD],
+ job_header[JOB_DESC_RESTART_INDEX_WORD],
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+ minX, minY,
+ maxX, maxY);
+
+ /* Set the restart index to the one which generated the fault*/
+ job_header[JOB_DESC_RESTART_INDEX_WORD] =
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD];
+
+ if (restartX < minX) {
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY;
+ dev_warn(dev,
+ "Clamping restart X index to minimum. %08x clamped to %08x\n",
+ restartX, minX);
+ clamped = 1;
+ }
+ if (restartY < minY) {
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX;
+ dev_warn(dev,
+ "Clamping restart Y index to minimum. %08x clamped to %08x\n",
+ restartY, minY);
+ clamped = 1;
+ }
+ if (restartX > maxX) {
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY;
+ dev_warn(dev,
+ "Clamping restart X index to maximum. %08x clamped to %08x\n",
+ restartX, maxX);
+ clamped = 1;
+ }
+ if (restartY > maxY) {
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX;
+ dev_warn(dev,
+ "Clamping restart Y index to maximum. %08x clamped to %08x\n",
+ restartY, maxY);
+ clamped = 1;
+ }
+
+ if (clamped) {
+ /* Reset the fault address low word
+ * and set the job status to STOPPED */
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0;
+ job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED;
+ dev_warn(dev, "After Clamping:\n"
+ "Jobstatus: %08x\n"
+ "restartIdx: %08x\n"
+ "Fault_addr_low: %08x\n"
+ "minCoordsX: %08x minCoordsY: %08x\n"
+ "maxCoordsX: %08x maxCoordsY: %08x\n",
+ job_header[JOB_DESC_STATUS_WORD],
+ job_header[JOB_DESC_RESTART_INDEX_WORD],
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+ minX, minY,
+ maxX, maxY);
+
+ /* Flush CPU cache to update memory for future GPU reads*/
+ memcpy(page_1, dst, copy_size);
+ p = pfn_to_page(PFN_DOWN(page_array[page_index]));
+
+ kbase_sync_single_for_device(katom->kctx->kbdev,
+ kbase_dma_addr(p) + offset,
+ copy_size, DMA_TO_DEVICE);
+
+ if (copy_size < JOB_HEADER_SIZE) {
+ memcpy(page_2, dst + copy_size,
+ JOB_HEADER_SIZE - copy_size);
+ p = pfn_to_page(PFN_DOWN(page_array[page_index + 1]));
+
+ kbase_sync_single_for_device(katom->kctx->kbdev,
+ kbase_dma_addr(p),
+ JOB_HEADER_SIZE - copy_size,
+ DMA_TO_DEVICE);
+ }
+ }
+ if (copy_size < JOB_HEADER_SIZE)
+ kunmap_atomic(page_2);
+
+ kunmap_atomic(page_1);
+
+out_unlock:
+ kbase_gpu_vm_unlock(katom->kctx);
+ return clamped;
+}
diff --git a/midgard-0.r2p0/mali_kbase_10969_workaround.h b/midgard-0.r2p0/mali_kbase_10969_workaround.h
new file mode 100755
index 0000000..099a298
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_10969_workaround.h
@@ -0,0 +1,23 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_10969_WORKAROUND_
+#define _KBASE_10969_WORKAROUND_
+
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom);
+
+#endif /* _KBASE_10969_WORKAROUND_ */
diff --git a/midgard-0.r2p0/mali_kbase_as_fault_debugfs.c b/midgard-0.r2p0/mali_kbase_as_fault_debugfs.c
new file mode 100755
index 0000000..f910fe9
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_as_fault_debugfs.c
@@ -0,0 +1,102 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/debugfs.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+
+static int kbase_as_fault_read(struct seq_file *sfile, void *data)
+{
+ uintptr_t as_no = (uintptr_t) sfile->private;
+
+ struct list_head *entry;
+ const struct list_head *kbdev_list;
+ struct kbase_device *kbdev = NULL;
+
+ kbdev_list = kbase_dev_list_get();
+
+ list_for_each(entry, kbdev_list) {
+ kbdev = list_entry(entry, struct kbase_device, entry);
+
+ if(kbdev->debugfs_as_read_bitmap & (1ULL << as_no)) {
+
+ /* don't show this one again until another fault occors */
+ kbdev->debugfs_as_read_bitmap &= ~(1ULL << as_no);
+
+ /* output the last page fault addr */
+ seq_printf(sfile, "%llu\n", (u64) kbdev->as[as_no].fault_addr);
+ }
+
+ }
+
+ kbase_dev_list_put(kbdev_list);
+
+ return 0;
+}
+
+static int kbase_as_fault_debugfs_open(struct inode *in, struct file *file)
+{
+ return single_open(file, kbase_as_fault_read , in->i_private);
+}
+
+static const struct file_operations as_fault_fops = {
+ .open = kbase_as_fault_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Initialize debugfs entry for each address space
+ */
+void kbase_as_fault_debugfs_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+ uint i;
+ char as_name[64];
+ struct dentry *debugfs_directory;
+
+ kbdev->debugfs_as_read_bitmap = 0ULL;
+
+ KBASE_DEBUG_ASSERT(kbdev->nr_hw_address_spaces);
+ KBASE_DEBUG_ASSERT(sizeof(kbdev->as[0].fault_addr) == sizeof(u64));
+
+ debugfs_directory = debugfs_create_dir("address_spaces",
+ kbdev->mali_debugfs_directory);
+
+ if(debugfs_directory) {
+ for(i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ snprintf(as_name, ARRAY_SIZE(as_name), "as%u", i);
+ debugfs_create_file(as_name, S_IRUGO,
+ debugfs_directory, (void*) ((uintptr_t) i), &as_fault_fops);
+ }
+ }
+ else
+ dev_warn(kbdev->dev, "unable to create address_spaces debugfs directory");
+
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* CONFIG_DEBUG_FS */
+ return;
+}
diff --git a/midgard-0.r2p0/mali_kbase_as_fault_debugfs.h b/midgard-0.r2p0/mali_kbase_as_fault_debugfs.h
new file mode 100755
index 0000000..3ed2248
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_as_fault_debugfs.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_AS_FAULT_DEBUG_FS_H
+#define _KBASE_AS_FAULT_DEBUG_FS_H
+
+/**
+ * kbase_as_fault_debugfs_init() - Add debugfs files for reporting page faults
+ *
+ * @kbdev: Pointer to kbase_device
+ */
+void kbase_as_fault_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_as_fault_debugfs_new() - make the last fault available on debugfs
+ *
+ * @kbdev: Pointer to kbase_device
+ * @as_no: The address space the fault occurred on
+ */
+static inline void
+kbase_as_fault_debugfs_new(struct kbase_device *kbdev, int as_no)
+{
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+ kbdev->debugfs_as_read_bitmap |= (1ULL << as_no);
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_MALI_DEBUG */
+ return;
+}
+
+#endif /*_KBASE_AS_FAULT_DEBUG_FS_H*/
diff --git a/midgard-0.r2p0/mali_kbase_cache_policy.c b/midgard-0.r2p0/mali_kbase_cache_policy.c
new file mode 100755
index 0000000..c67b3e9
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_cache_policy.c
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Cache Policy API.
+ */
+
+#include "mali_kbase_cache_policy.h"
+
+/*
+ * The output flags should be a combination of the following values:
+ * KBASE_REG_CPU_CACHED: CPU cache should be enabled.
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
+{
+ u32 cache_flags = 0;
+
+ CSTD_UNUSED(nr_pages);
+
+ if (flags & BASE_MEM_CACHED_CPU)
+ cache_flags |= KBASE_REG_CPU_CACHED;
+
+ return cache_flags;
+}
+
+
+void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+/* Check if kernel is using coherency with GPU */
+#ifdef CONFIG_MALI_COH_KERN
+ if (kbdev->system_coherency == COHERENCY_ACE)
+ return;
+#endif /* CONFIG_MALI_COH_KERN */
+ dma_sync_single_for_device(kbdev->dev, handle, size, dir);
+}
+
+
+void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+/* Check if kernel is using coherency with GPU */
+#ifdef CONFIG_MALI_COH_KERN
+ if (kbdev->system_coherency == COHERENCY_ACE)
+ return;
+#endif /* CONFIG_MALI_COH_KERN */
+ dma_sync_single_for_cpu(kbdev->dev, handle, size, dir);
+}
diff --git a/midgard-0.r2p0/mali_kbase_cache_policy.h b/midgard-0.r2p0/mali_kbase_cache_policy.h
new file mode 100755
index 0000000..0c18bdb
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_cache_policy.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Cache Policy API.
+ */
+
+#ifndef _KBASE_CACHE_POLICY_H_
+#define _KBASE_CACHE_POLICY_H_
+
+#include "mali_kbase.h"
+#include "mali_base_kernel.h"
+
+/**
+ * kbase_cache_enabled - Choose the cache policy for a specific region
+ * @flags: flags describing attributes of the region
+ * @nr_pages: total number of pages (backed or not) for the region
+ *
+ * Tells whether the CPU and GPU caches should be enabled or not for a specific
+ * region.
+ * This function can be modified to customize the cache policy depending on the
+ * flags and size of the region.
+ *
+ * Return: a combination of %KBASE_REG_CPU_CACHED and %KBASE_REG_GPU_CACHED
+ * depending on the cache policy
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages);
+
+#endif /* _KBASE_CACHE_POLICY_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_config.c b/midgard-0.r2p0/mali_kbase_config.c
new file mode 100755
index 0000000..fb615ae
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_config.c
@@ -0,0 +1,51 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
+
+int kbasep_platform_device_init(struct kbase_device *kbdev)
+{
+ struct kbase_platform_funcs_conf *platform_funcs_p;
+
+ platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS;
+ if (platform_funcs_p && platform_funcs_p->platform_init_func)
+ return platform_funcs_p->platform_init_func(kbdev);
+
+ return 0;
+}
+
+void kbasep_platform_device_term(struct kbase_device *kbdev)
+{
+ struct kbase_platform_funcs_conf *platform_funcs_p;
+
+ platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS;
+ if (platform_funcs_p && platform_funcs_p->platform_term_func)
+ platform_funcs_p->platform_term_func(kbdev);
+}
+
+int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed)
+{
+ KBASE_DEBUG_ASSERT(NULL != clock_speed);
+
+ *clock_speed = 100;
+ return 0;
+}
+
diff --git a/midgard-0.r2p0/mali_kbase_config.h b/midgard-0.r2p0/mali_kbase_config.h
new file mode 100755
index 0000000..356d52b
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_config.h
@@ -0,0 +1,345 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_config.h
+ * Configuration API and Attributes for KBase
+ */
+
+#ifndef _KBASE_CONFIG_H_
+#define _KBASE_CONFIG_H_
+
+#include <asm/page.h>
+
+#include <mali_malisw.h>
+#include <mali_kbase_backend_config.h>
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_config Configuration API and Attributes
+ * @{
+ */
+
+#include <linux/rbtree.h>
+
+/* Forward declaration of struct kbase_device */
+struct kbase_device;
+
+/**
+ * kbase_platform_funcs_conf - Specifies platform init/term function pointers
+ *
+ * Specifies the functions pointers for platform specific initialization and
+ * termination. By default no functions are required. No additional platform
+ * specific control is necessary.
+ */
+struct kbase_platform_funcs_conf {
+ /**
+ * platform_init_func - platform specific init function pointer
+ * @kbdev - kbase_device pointer
+ *
+ * Returns 0 on success, negative error code otherwise.
+ *
+ * Function pointer for platform specific initialization or NULL if no
+ * initialization function is required. At the point this the GPU is
+ * not active and its power and clocks are in unknown (platform specific
+ * state) as kbase doesn't yet have control of power and clocks.
+ *
+ * The platform specific private pointer kbase_device::platform_context
+ * can be accessed (and possibly initialized) in here.
+ */
+ int (*platform_init_func)(struct kbase_device *kbdev);
+ /**
+ * platform_term_func - platform specific termination function pointer
+ * @kbdev - kbase_device pointer
+ *
+ * Function pointer for platform specific termination or NULL if no
+ * termination function is required. At the point this the GPU will be
+ * idle but still powered and clocked.
+ *
+ * The platform specific private pointer kbase_device::platform_context
+ * can be accessed (and possibly terminated) in here.
+ */
+ void (*platform_term_func)(struct kbase_device *kbdev);
+};
+
+/*
+ * @brief Specifies the callbacks for power management
+ *
+ * By default no callbacks will be made and the GPU must not be powered off.
+ */
+struct kbase_pm_callback_conf {
+ /** Callback for when the GPU is idle and the power to it can be switched off.
+ *
+ * The system integrator can decide whether to either do nothing, just switch off
+ * the clocks to the GPU, or to completely power down the GPU.
+ * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
+ * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
+ */
+ void (*power_off_callback)(struct kbase_device *kbdev);
+
+ /** Callback for when the GPU is about to become active and power must be supplied.
+ *
+ * This function must not return until the GPU is powered and clocked sufficiently for register access to
+ * succeed. The return value specifies whether the GPU was powered down since the call to power_off_callback.
+ * If the GPU state has been lost then this function must return 1, otherwise it should return 0.
+ * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
+ * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
+ *
+ * The return value of the first call to this function is ignored.
+ *
+ * @return 1 if the GPU state may have been lost, 0 otherwise.
+ */
+ int (*power_on_callback)(struct kbase_device *kbdev);
+
+ /** Callback for when the system is requesting a suspend and GPU power
+ * must be switched off.
+ *
+ * Note that if this callback is present, then this may be called
+ * without a preceding call to power_off_callback. Therefore this
+ * callback must be able to take any action that might otherwise happen
+ * in power_off_callback.
+ *
+ * The platform specific private pointer kbase_device::platform_context
+ * can be accessed and modified in here. It is the platform \em
+ * callbacks responsibility to initialize and terminate this pointer if
+ * used (see @ref kbase_platform_funcs_conf).
+ */
+ void (*power_suspend_callback)(struct kbase_device *kbdev);
+
+ /** Callback for when the system is resuming from a suspend and GPU
+ * power must be switched on.
+ *
+ * Note that if this callback is present, then this may be called
+ * without a following call to power_on_callback. Therefore this
+ * callback must be able to take any action that might otherwise happen
+ * in power_on_callback.
+ *
+ * The platform specific private pointer kbase_device::platform_context
+ * can be accessed and modified in here. It is the platform \em
+ * callbacks responsibility to initialize and terminate this pointer if
+ * used (see @ref kbase_platform_funcs_conf).
+ */
+ void (*power_resume_callback)(struct kbase_device *kbdev);
+
+ /** Callback for handling runtime power management initialization.
+ *
+ * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
+ * will become active from calls made to the OS from within this function.
+ * The runtime calls can be triggered by calls from @ref power_off_callback and @ref power_on_callback.
+ * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+ *
+ * @return 0 on success, else int error code.
+ */
+ int (*power_runtime_init_callback)(struct kbase_device *kbdev);
+
+ /** Callback for handling runtime power management termination.
+ *
+ * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
+ * should no longer be called by the OS on completion of this function.
+ * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+ */
+ void (*power_runtime_term_callback)(struct kbase_device *kbdev);
+
+ /** Callback for runtime power-off power management callback
+ *
+ * For linux this callback will be called by the kernel runtime_suspend callback.
+ * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+ *
+ * @return 0 on success, else OS error code.
+ */
+ void (*power_runtime_off_callback)(struct kbase_device *kbdev);
+
+ /** Callback for runtime power-on power management callback
+ *
+ * For linux this callback will be called by the kernel runtime_resume callback.
+ * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+ */
+ int (*power_runtime_on_callback)(struct kbase_device *kbdev);
+
+ /*
+ * Optional callback for checking if GPU can be suspended when idle
+ *
+ * This callback will be called by the runtime power management core
+ * when the reference count goes to 0 to provide notification that the
+ * GPU now seems idle.
+ *
+ * If this callback finds that the GPU can't be powered off, or handles
+ * suspend by powering off directly or queueing up a power off, a
+ * non-zero value must be returned to prevent the runtime PM core from
+ * also triggering a suspend.
+ *
+ * Returning 0 will cause the runtime PM core to conduct a regular
+ * autosuspend.
+ *
+ * This callback is optional and if not provided regular autosuspend
+ * will be triggered.
+ *
+ * Note: The Linux kernel must have CONFIG_PM_RUNTIME enabled to use
+ * this feature.
+ *
+ * Return 0 if GPU can be suspended, positive value if it can not be
+ * suspeneded by runtime PM, else OS error code
+ */
+ int (*power_runtime_idle_callback)(struct kbase_device *kbdev);
+};
+
+/**
+ * kbase_cpuprops_get_default_clock_speed - default for CPU_SPEED_FUNC
+ * @clock_speed - see kbase_cpu_clk_speed_func for details on the parameters
+ *
+ * Returns 0 on success, negative error code otherwise.
+ *
+ * Default implementation of CPU_SPEED_FUNC. This function sets clock_speed
+ * to 100, so will be an underestimate for any real system.
+ */
+int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed);
+
+/**
+ * kbase_cpu_clk_speed_func - Type of the function pointer for CPU_SPEED_FUNC
+ * @param clock_speed - pointer to store the current CPU clock speed in MHz
+ *
+ * Returns 0 on success, otherwise negative error code.
+ *
+ * This is mainly used to implement OpenCL's clGetDeviceInfo().
+ */
+typedef int (*kbase_cpu_clk_speed_func) (u32 *clock_speed);
+
+/**
+ * kbase_gpu_clk_speed_func - Type of the function pointer for GPU_SPEED_FUNC
+ * @param clock_speed - pointer to store the current GPU clock speed in MHz
+ *
+ * Returns 0 on success, otherwise negative error code.
+ * When an error is returned the caller assumes maximum GPU speed stored in
+ * gpu_freq_khz_max.
+ *
+ * If the system timer is not available then this function is required
+ * for the OpenCL queue profiling to return correct timing information.
+ *
+ */
+typedef int (*kbase_gpu_clk_speed_func) (u32 *clock_speed);
+
+#ifdef CONFIG_OF
+struct kbase_platform_config {
+};
+#else
+
+/*
+ * @brief Specifies start and end of I/O memory region.
+ */
+struct kbase_io_memory_region {
+ u64 start;
+ u64 end;
+};
+
+/*
+ * @brief Specifies I/O related resources like IRQs and memory region for I/O operations.
+ */
+struct kbase_io_resources {
+ u32 job_irq_number;
+ u32 mmu_irq_number;
+ u32 gpu_irq_number;
+ struct kbase_io_memory_region io_memory_region;
+};
+
+struct kbase_platform_config {
+ const struct kbase_io_resources *io_resources;
+};
+
+#endif /* CONFIG_OF */
+
+/**
+ * @brief Gets the pointer to platform config.
+ *
+ * @return Pointer to the platform config
+ */
+struct kbase_platform_config *kbase_get_platform_config(void);
+
+/**
+ * kbasep_platform_device_init: - Platform specific call to initialize hardware
+ * @kbdev: kbase device pointer
+ *
+ * Function calls a platform defined routine if specified in the configuration
+ * attributes. The routine can initialize any hardware and context state that
+ * is required for the GPU block to function.
+ *
+ * Return: 0 if no errors have been found in the config.
+ * Negative error code otherwise.
+ */
+int kbasep_platform_device_init(struct kbase_device *kbdev);
+
+/**
+ * kbasep_platform_device_term - Platform specific call to terminate hardware
+ * @kbdev: Kbase device pointer
+ *
+ * Function calls a platform defined routine if specified in the configuration
+ * attributes. The routine can destroy any platform specific context state and
+ * shut down any hardware functionality that are outside of the Power Management
+ * callbacks.
+ *
+ */
+void kbasep_platform_device_term(struct kbase_device *kbdev);
+
+
+/**
+ * kbase_platform_early_init - Early initialisation of the platform code
+ *
+ * This function will be called when the module is loaded to perform any
+ * early initialisation required by the platform code. Such as reading
+ * platform specific device tree entries for the GPU.
+ *
+ * Return: 0 for success, any other fail causes module initialisation to fail
+ */
+int kbase_platform_early_init(void);
+
+#ifndef CONFIG_OF
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+/**
+ * kbase_platform_fake_register - Register a platform device for the GPU
+ *
+ * This can be used to register a platform device on systems where device tree
+ * is not enabled and the platform initialisation code in the kernel doesn't
+ * create the GPU device. Where possible device tree should be used instead.
+ *
+ * Return: 0 for success, any other fail causes module initialisation to fail
+ */
+int kbase_platform_fake_register(void);
+
+/**
+ * kbase_platform_fake_unregister - Unregister a fake platform device
+ *
+ * Unregister the platform device created with kbase_platform_fake_register()
+ */
+void kbase_platform_fake_unregister(void);
+#endif
+#endif
+
+ /** @} *//* end group kbase_config */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_CONFIG_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_config_defaults.h b/midgard-0.r2p0/mali_kbase_config_defaults.h
new file mode 100755
index 0000000..0f11388
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_config_defaults.h
@@ -0,0 +1,260 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_config_defaults.h
+ *
+ * Default values for configuration settings
+ *
+ */
+
+#ifndef _KBASE_CONFIG_DEFAULTS_H_
+#define _KBASE_CONFIG_DEFAULTS_H_
+
+/* Include mandatory definitions per platform */
+#include <mali_kbase_config_platform.h>
+
+/**
+ * Irq throttle. It is the minimum desired time in between two
+ * consecutive gpu interrupts (given in 'us'). The irq throttle
+ * gpu register will be configured after this, taking into
+ * account the configured max frequency.
+ *
+ * Attached value: number in micro seconds
+ */
+#define DEFAULT_IRQ_THROTTLE_TIME_US 20
+
+/**
+ * Default Job Scheduler initial runtime of a context for the CFS Policy,
+ * in time-slices.
+ *
+ * This value is relative to that of the least-run context, and defines
+ * where in the CFS queue a new context is added. A value of 1 means 'after
+ * the least-run context has used its timeslice'. Therefore, when all
+ * contexts consistently use the same amount of time, a value of 1 models a
+ * FIFO. A value of 0 would model a LIFO.
+ *
+ * The value is represented in "numbers of time slices". Multiply this
+ * value by that defined in @ref DEFAULT_JS_CTX_TIMESLICE_NS to get
+ * the time value for this in nanoseconds.
+ */
+#define DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES 1
+
+/**
+ * Default Job Scheduler minimum runtime value of a context for CFS, in
+ * time_slices relative to that of the least-run context.
+ *
+ * This is a measure of how much preferrential treatment is given to a
+ * context that is not run very often.
+ *
+ * Specficially, this value defines how many timeslices such a context is
+ * (initially) allowed to use at once. Such contexts (e.g. 'interactive'
+ * processes) will appear near the front of the CFS queue, and can initially
+ * use more time than contexts that run continuously (e.g. 'batch'
+ * processes).
+ *
+ * This limit \b prevents a "stored-up timeslices" DoS attack, where a ctx
+ * not run for a long time attacks the system by using a very large initial
+ * number of timeslices when it finally does run.
+ *
+ * @note A value of zero allows not-run-often contexts to get scheduled in
+ * quickly, but to only use a single timeslice when they get scheduled in.
+ */
+#define DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES 2
+
+/**
+* Boolean indicating whether the driver is configured to be secure at
+* a potential loss of performance.
+*
+* This currently affects only r0p0-15dev0 HW and earlier.
+*
+* On r0p0-15dev0 HW and earlier, there are tradeoffs between security and
+* performance:
+*
+* - When this is set to true, the driver remains fully secure,
+* but potentially loses performance compared with setting this to
+* false.
+* - When set to false, the driver is open to certain security
+* attacks.
+*
+* From r0p0-00rel0 and onwards, there is no security loss by setting
+* this to false, and no performance loss by setting it to
+* true.
+*/
+#define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE false
+
+enum {
+ /**
+ * Use unrestricted Address ID width on the AXI bus.
+ */
+ KBASE_AID_32 = 0x0,
+
+ /**
+ * Restrict GPU to a half of maximum Address ID count.
+ * This will reduce performance, but reduce bus load due to GPU.
+ */
+ KBASE_AID_16 = 0x3,
+
+ /**
+ * Restrict GPU to a quarter of maximum Address ID count.
+ * This will reduce performance, but reduce bus load due to GPU.
+ */
+ KBASE_AID_8 = 0x2,
+
+ /**
+ * Restrict GPU to an eighth of maximum Address ID count.
+ * This will reduce performance, but reduce bus load due to GPU.
+ */
+ KBASE_AID_4 = 0x1
+};
+
+/**
+ * Default setting for read Address ID limiting on AXI bus.
+ *
+ * Attached value: u32 register value
+ * KBASE_AID_32 - use the full 32 IDs (5 ID bits)
+ * KBASE_AID_16 - use 16 IDs (4 ID bits)
+ * KBASE_AID_8 - use 8 IDs (3 ID bits)
+ * KBASE_AID_4 - use 4 IDs (2 ID bits)
+ * Default value: KBASE_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_ARID_LIMIT KBASE_AID_32
+
+/**
+ * Default setting for write Address ID limiting on AXI.
+ *
+ * Attached value: u32 register value
+ * KBASE_AID_32 - use the full 32 IDs (5 ID bits)
+ * KBASE_AID_16 - use 16 IDs (4 ID bits)
+ * KBASE_AID_8 - use 8 IDs (3 ID bits)
+ * KBASE_AID_4 - use 4 IDs (2 ID bits)
+ * Default value: KBASE_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_AWID_LIMIT KBASE_AID_32
+
+/**
+ * Default UMP device mapping. A UMP_DEVICE_<device>_SHIFT value which
+ * defines which UMP device this GPU should be mapped to.
+ */
+#define DEFAULT_UMP_GPU_DEVICE_SHIFT UMP_DEVICE_Z_SHIFT
+
+/*
+ * Default period for DVFS sampling
+ */
+#define DEFAULT_PM_DVFS_PERIOD 100 /* 100ms */
+
+/*
+ * Power Management poweroff tick granuality. This is in nanoseconds to
+ * allow HR timer support.
+ *
+ * On each scheduling tick, the power manager core may decide to:
+ * -# Power off one or more shader cores
+ * -# Power off the entire GPU
+ */
+#define DEFAULT_PM_GPU_POWEROFF_TICK_NS (400000) /* 400us */
+
+/*
+ * Power Manager number of ticks before shader cores are powered off
+ */
+#define DEFAULT_PM_POWEROFF_TICK_SHADER (2) /* 400-800us */
+
+/*
+ * Power Manager number of ticks before GPU is powered off
+ */
+#define DEFAULT_PM_POWEROFF_TICK_GPU (2) /* 400-800us */
+
+/*
+ * Default scheduling tick granuality
+ */
+#define DEFAULT_JS_SCHEDULING_PERIOD_NS (100000000u) /* 100ms */
+
+/*
+ * Default minimum number of scheduling ticks before jobs are soft-stopped.
+ *
+ * This defines the time-slice for a job (which may be different from that of a
+ * context)
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS (1) /* 100ms-200ms */
+
+/*
+ * Default minimum number of scheduling ticks before CL jobs are soft-stopped.
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS_CL (1) /* 100ms-200ms */
+
+/*
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS (100) /* 10s */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS_8408 (300) /* 30s */
+
+/*
+ * Default minimum number of scheduling ticks before CL jobs are hard-stopped.
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_CL (100) /* 10s */
+
+/*
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ * during dumping
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_DUMPING (15000) /* 1500s */
+
+/*
+ * Default timeout for some software jobs, after which the software event wait
+ * jobs will be cancelled.
+ */
+#define DEFAULT_JS_SOFT_JOB_TIMEOUT ((u32)3000) /* 3s */
+
+/*
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" job
+ */
+#define DEFAULT_JS_RESET_TICKS_SS (105) /* 10.5s */
+#define DEFAULT_JS_RESET_TICKS_SS_8408 (450) /* 45s */
+
+/*
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" CL job.
+ */
+#define DEFAULT_JS_RESET_TICKS_CL (105) /* 10.5s */
+
+/*
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" job during dumping.
+ */
+#define DEFAULT_JS_RESET_TICKS_DUMPING (15020) /* 1502s */
+
+/*
+ * Default number of milliseconds given for other jobs on the GPU to be
+ * soft-stopped when the GPU needs to be reset.
+ */
+#define DEFAULT_RESET_TIMEOUT_MS (3000) /* 3s */
+
+/*
+ * Default timeslice that a context is scheduled in for, in nanoseconds.
+ *
+ * When a context has used up this amount of time across its jobs, it is
+ * scheduled out to let another run.
+ *
+ * @note the resolution is nanoseconds (ns) here, because that's the format
+ * often used by the OS.
+ */
+#define DEFAULT_JS_CTX_TIMESLICE_NS (50000000) /* 50ms */
+
+#endif /* _KBASE_CONFIG_DEFAULTS_H_ */
+
diff --git a/midgard-0.r2p0/mali_kbase_context.c b/midgard-0.r2p0/mali_kbase_context.c
new file mode 100755
index 0000000..55c5ef6
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_context.c
@@ -0,0 +1,321 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel context APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_mem_linux.h>
+
+/**
+ * kbase_create_context() - Create a kernel base context.
+ * @kbdev: Kbase device
+ * @is_compat: Force creation of a 32-bit context
+ *
+ * Allocate and init a kernel base context.
+ *
+ * Return: new kbase context
+ */
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat)
+{
+ struct kbase_context *kctx;
+ int err;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ /* zero-inited as lot of code assume it's zero'ed out on create */
+ kctx = vzalloc(sizeof(*kctx));
+
+ if (!kctx)
+ goto out;
+
+ /* creating a context is considered a disjoint event */
+ kbase_disjoint_event(kbdev);
+
+ kctx->kbdev = kbdev;
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+ if (is_compat)
+ kbase_ctx_flag_set(kctx, KCTX_COMPAT);
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ kctx->timeline.owner_tgid = task_tgid_nr(current);
+#endif
+ atomic_set(&kctx->setup_complete, 0);
+ atomic_set(&kctx->setup_in_progress, 0);
+ spin_lock_init(&kctx->mm_update_lock);
+ kctx->process_mm = NULL;
+ atomic_set(&kctx->nonmapped_pages, 0);
+ kctx->slots_pullable = 0;
+ kctx->tgid = current->tgid;
+ kctx->pid = current->pid;
+
+ err = kbase_mem_pool_init(&kctx->mem_pool,
+ kbdev->mem_pool_max_size_default,
+ kctx->kbdev, &kbdev->mem_pool);
+ if (err)
+ goto free_kctx;
+
+ err = kbase_mem_evictable_init(kctx);
+ if (err)
+ goto free_pool;
+
+ atomic_set(&kctx->used_pages, 0);
+
+ err = kbase_jd_init(kctx);
+ if (err)
+ goto deinit_evictable;
+
+ err = kbasep_js_kctx_init(kctx);
+ if (err)
+ goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
+
+ err = kbase_event_init(kctx);
+ if (err)
+ goto free_jd;
+
+ atomic_set(&kctx->drain_pending, 0);
+
+ mutex_init(&kctx->reg_lock);
+
+ INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
+ spin_lock_init(&kctx->waiting_soft_jobs_lock);
+#ifdef CONFIG_KDS
+ INIT_LIST_HEAD(&kctx->waiting_kds_resource);
+#endif
+ err = kbase_dma_fence_init(kctx);
+ if (err)
+ goto free_event;
+
+ err = kbase_mmu_init(kctx);
+ if (err)
+ goto term_dma_fence;
+
+ do {
+ err = kbase_mem_pool_grow(&kctx->mem_pool,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ if (err)
+ goto pgd_no_mem;
+ kctx->pgd = kbase_mmu_alloc_pgd(kctx);
+ } while (!kctx->pgd);
+
+ kctx->aliasing_sink_page = kbase_mem_alloc_page(kctx->kbdev);
+ if (!kctx->aliasing_sink_page)
+ goto no_sink_page;
+
+ init_waitqueue_head(&kctx->event_queue);
+
+ kctx->cookies = KBASE_COOKIE_MASK;
+
+ /* Make sure page 0 is not used... */
+ err = kbase_region_tracker_init(kctx);
+ if (err)
+ goto no_region_tracker;
+
+ err = kbase_sticky_resource_init(kctx);
+ if (err)
+ goto no_sticky;
+
+ err = kbase_jit_init(kctx);
+ if (err)
+ goto no_jit;
+#ifdef CONFIG_GPU_TRACEPOINTS
+ atomic_set(&kctx->jctx.work_id, 0);
+#endif
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
+#endif
+
+ kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
+
+ mutex_init(&kctx->vinstr_cli_lock);
+
+ setup_timer(&kctx->soft_job_timeout,
+ kbasep_soft_job_timeout_worker,
+ (uintptr_t)kctx);
+
+ return kctx;
+
+no_jit:
+ kbase_gpu_vm_lock(kctx);
+ kbase_sticky_resource_term(kctx);
+ kbase_gpu_vm_unlock(kctx);
+no_sticky:
+ kbase_region_tracker_term(kctx);
+no_region_tracker:
+ kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
+no_sink_page:
+ /* VM lock needed for the call to kbase_mmu_free_pgd */
+ kbase_gpu_vm_lock(kctx);
+ kbase_mmu_free_pgd(kctx);
+ kbase_gpu_vm_unlock(kctx);
+pgd_no_mem:
+ kbase_mmu_term(kctx);
+term_dma_fence:
+ kbase_dma_fence_term(kctx);
+free_event:
+ kbase_event_cleanup(kctx);
+free_jd:
+ /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+ kbasep_js_kctx_term(kctx);
+ kbase_jd_exit(kctx);
+deinit_evictable:
+ kbase_mem_evictable_deinit(kctx);
+free_pool:
+ kbase_mem_pool_term(&kctx->mem_pool);
+free_kctx:
+ vfree(kctx);
+out:
+ return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_create_context);
+
+static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
+{
+ dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ kfree(reg);
+}
+
+/**
+ * kbase_destroy_context - Destroy a kernel base context.
+ * @kctx: Context to destroy
+ *
+ * Calls kbase_destroy_os_context() to free OS specific structures.
+ * Will release all outstanding regions.
+ */
+void kbase_destroy_context(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev;
+ int pages;
+ unsigned long pending_regions_to_clean;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
+
+ /* Ensure the core is powered up for the destroy process */
+ /* A suspend won't happen here, because we're in a syscall from a userspace
+ * thread. */
+ kbase_pm_context_active(kbdev);
+
+ kbase_jd_zap_context(kctx);
+ kbase_event_cleanup(kctx);
+
+ /*
+ * JIT must be terminated before the code below as it must be called
+ * without the region lock being held.
+ * The code above ensures no new JIT allocations can be made by
+ * by the time we get to this point of context tear down.
+ */
+ kbase_jit_term(kctx);
+
+ kbase_gpu_vm_lock(kctx);
+
+ kbase_sticky_resource_term(kctx);
+
+ /* MMU is disabled as part of scheduling out the context */
+ kbase_mmu_free_pgd(kctx);
+
+ /* drop the aliasing sink page now that it can't be mapped anymore */
+ kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
+
+ /* free pending region setups */
+ pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
+ while (pending_regions_to_clean) {
+ unsigned int cookie = __ffs(pending_regions_to_clean);
+
+ BUG_ON(!kctx->pending_regions[cookie]);
+
+ kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
+
+ kctx->pending_regions[cookie] = NULL;
+ pending_regions_to_clean &= ~(1UL << cookie);
+ }
+
+ kbase_region_tracker_term(kctx);
+ kbase_gpu_vm_unlock(kctx);
+
+ /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+ kbasep_js_kctx_term(kctx);
+
+ kbase_jd_exit(kctx);
+
+ kbase_pm_context_idle(kbdev);
+
+ kbase_dma_fence_term(kctx);
+
+ kbase_mmu_term(kctx);
+
+ pages = atomic_read(&kctx->used_pages);
+ if (pages != 0)
+ dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
+
+ kbase_mem_evictable_deinit(kctx);
+ kbase_mem_pool_term(&kctx->mem_pool);
+ WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
+
+ vfree(kctx);
+}
+KBASE_EXPORT_SYMBOL(kbase_destroy_context);
+
+/**
+ * kbase_context_set_create_flags - Set creation flags on a context
+ * @kctx: Kbase context
+ * @flags: Flags to set
+ *
+ * Return: 0 on success
+ */
+int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
+{
+ int err = 0;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ unsigned long irq_flags;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* Validate flags */
+ if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+
+ /* Translate the flags */
+ if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+ kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+
+ /* Latch the initial attributes into the Job Scheduler */
+ kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
+
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ out:
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);
diff --git a/midgard-0.r2p0/mali_kbase_context.h b/midgard-0.r2p0/mali_kbase_context.h
new file mode 100755
index 0000000..a3f5bb0
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_context.h
@@ -0,0 +1,90 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_CONTEXT_H_
+#define _KBASE_CONTEXT_H_
+
+#include <linux/atomic.h>
+
+
+int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags);
+
+/**
+ * kbase_ctx_flag - Check if @flag is set on @kctx
+ * @kctx: Pointer to kbase context to check
+ * @flag: Flag to check
+ *
+ * Return: true if @flag is set on @kctx, false if not.
+ */
+static inline bool kbase_ctx_flag(struct kbase_context *kctx,
+ enum kbase_context_flags flag)
+{
+ return atomic_read(&kctx->flags) & flag;
+}
+
+/**
+ * kbase_ctx_flag_clear - Clear @flag on @kctx
+ * @kctx: Pointer to kbase context
+ * @flag: Flag to clear
+ *
+ * Clear the @flag on @kctx. This is done atomically, so other flags being
+ * cleared or set at the same time will be safe.
+ *
+ * Some flags have locking requirements, check the documentation for the
+ * respective flags.
+ */
+static inline void kbase_ctx_flag_clear(struct kbase_context *kctx,
+ enum kbase_context_flags flag)
+{
+#if KERNEL_VERSION(4, 3, 0) > LINUX_VERSION_CODE
+ /*
+ * Earlier kernel versions doesn't have atomic_andnot() or
+ * atomic_and(). atomic_clear_mask() was only available on some
+ * architectures and removed on arm in v3.13 on arm and arm64.
+ *
+ * Use a compare-exchange loop to clear the flag on pre 4.3 kernels,
+ * when atomic_andnot() becomes available.
+ */
+ int old, new;
+
+ do {
+ old = atomic_read(&kctx->flags);
+ new = old & ~flag;
+
+ } while (atomic_cmpxchg(&kctx->flags, old, new) != old);
+#else
+ atomic_andnot(flag, &kctx->flags);
+#endif
+}
+
+/**
+ * kbase_ctx_flag_set - Set @flag on @kctx
+ * @kctx: Pointer to kbase context
+ * @flag: Flag to clear
+ *
+ * Set the @flag on @kctx. This is done atomically, so other flags being
+ * cleared or set at the same time will be safe.
+ *
+ * Some flags have locking requirements, check the documentation for the
+ * respective flags.
+ */
+static inline void kbase_ctx_flag_set(struct kbase_context *kctx,
+ enum kbase_context_flags flag)
+{
+ atomic_or(flag, &kctx->flags);
+}
+#endif /* _KBASE_CONTEXT_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_core_linux.c b/midgard-0.r2p0/mali_kbase_core_linux.c
new file mode 100755
index 0000000..f723613
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_core_linux.c
@@ -0,0 +1,4062 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_uku.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gator.h>
+#include <mali_kbase_mem_linux.h>
+#ifdef CONFIG_MALI_DEVFREQ
+#include <backend/gpu/mali_kbase_devfreq.h>
+#endif /* CONFIG_MALI_DEVFREQ */
+#ifdef CONFIG_MALI_NO_MALI
+#include "mali_kbase_model_linux.h"
+#endif /* CONFIG_MALI_NO_MALI */
+#include "mali_kbase_mem_profile_debugfs_buf_size.h"
+#include "mali_kbase_debug_mem_view.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_mem_pool_debugfs.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_regs_dump_debugfs.h"
+#endif /* !MALI_CUSTOMER_RELEASE */
+#include "mali_kbase_regs_history_debugfs.h"
+#include <mali_kbase_hwaccess_backend.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+
+#ifdef CONFIG_KDS
+#include <linux/kds.h>
+#include <linux/anon_inodes.h>
+#include <linux/syscalls.h>
+#endif /* CONFIG_KDS */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/compat.h> /* is_compat_task */
+#include <linux/mman.h>
+#include <linux/version.h>
+#ifdef CONFIG_MALI_PLATFORM_DEVICETREE
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
+#include <mali_kbase_hw.h>
+#include <platform/mali_kbase_platform_common.h>
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+#include <platform/mali_kbase_platform_fake.h>
+#endif /*CONFIG_MALI_PLATFORM_FAKE */
+#ifdef CONFIG_SYNC
+#include <mali_kbase_sync.h>
+#endif /* CONFIG_SYNC */
+#ifdef CONFIG_PM_DEVFREQ
+#include <linux/devfreq.h>
+#endif /* CONFIG_PM_DEVFREQ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <mali_kbase_config.h>
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#endif
+
+#include <mali_kbase_tlstream.h>
+
+#include <mali_kbase_as_fault_debugfs.h>
+
+/* GPU IRQ Tags */
+#define JOB_IRQ_TAG 0
+#define MMU_IRQ_TAG 1
+#define GPU_IRQ_TAG 2
+
+#if MALI_UNIT_TEST
+static struct kbase_exported_test_data shared_kernel_test_data;
+EXPORT_SYMBOL(shared_kernel_test_data);
+#endif /* MALI_UNIT_TEST */
+
+static int kbase_dev_nr;
+
+static DEFINE_MUTEX(kbase_dev_list_lock);
+static LIST_HEAD(kbase_dev_list);
+
+#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
+static inline void __compile_time_asserts(void)
+{
+ CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE);
+}
+
+static void kbase_create_timeline_objects(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ unsigned int lpu_id;
+ unsigned int as_nr;
+ struct kbasep_kctx_list_element *element;
+
+ /* Create LPU objects. */
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ u32 *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu);
+ }
+
+ /* Create Address Space objects. */
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr);
+
+ /* Create GPU object and make it retain all LPUs and address spaces. */
+ kbase_tlstream_tl_summary_new_gpu(
+ kbdev,
+ kbdev->gpu_props.props.raw_props.gpu_id,
+ kbdev->gpu_props.num_cores);
+
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ void *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev);
+ }
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ kbase_tlstream_tl_summary_lifelink_as_gpu(
+ &kbdev->as[as_nr],
+ kbdev);
+
+ /* Create object for each known context. */
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry(element, &kbdev->kctx_list, link) {
+ kbase_tlstream_tl_summary_new_ctx(
+ element->kctx,
+ (u32)(element->kctx->id),
+ (u32)(element->kctx->tgid));
+ }
+ /* Before releasing the lock, reset body stream buffers.
+ * This will prevent context creation message to be directed to both
+ * summary and body stream. */
+ kbase_tlstream_reset_body_streams();
+ mutex_unlock(&kbdev->kctx_list_lock);
+ /* Static object are placed into summary packet that needs to be
+ * transmitted first. Flush all streams to make it available to
+ * user space. */
+ kbase_tlstream_flush_streams();
+}
+
+static void kbase_api_handshake(struct uku_version_check_args *version)
+{
+ switch (version->major) {
+#ifdef BASE_LEGACY_UK6_SUPPORT
+ case 6:
+ /* We are backwards compatible with version 6,
+ * so pretend to be the old version */
+ version->major = 6;
+ version->minor = 1;
+ break;
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+#ifdef BASE_LEGACY_UK7_SUPPORT
+ case 7:
+ /* We are backwards compatible with version 7,
+ * so pretend to be the old version */
+ version->major = 7;
+ version->minor = 1;
+ break;
+#endif /* BASE_LEGACY_UK7_SUPPORT */
+#ifdef BASE_LEGACY_UK8_SUPPORT
+ case 8:
+ /* We are backwards compatible with version 8,
+ * so pretend to be the old version */
+ version->major = 8;
+ version->minor = 4;
+ break;
+#endif /* BASE_LEGACY_UK8_SUPPORT */
+#ifdef BASE_LEGACY_UK9_SUPPORT
+ case 9:
+ /* We are backwards compatible with version 9,
+ * so pretend to be the old version */
+ version->major = 9;
+ version->minor = 0;
+ break;
+#endif /* BASE_LEGACY_UK8_SUPPORT */
+ case BASE_UK_VERSION_MAJOR:
+ /* set minor to be the lowest common */
+ version->minor = min_t(int, BASE_UK_VERSION_MINOR,
+ (int)version->minor);
+ break;
+ default:
+ /* We return our actual version regardless if it
+ * matches the version returned by userspace -
+ * userspace can bail if it can't handle this
+ * version */
+ version->major = BASE_UK_VERSION_MAJOR;
+ version->minor = BASE_UK_VERSION_MINOR;
+ break;
+ }
+}
+
+/**
+ * enum mali_error - Mali error codes shared with userspace
+ *
+ * This is subset of those common Mali errors that can be returned to userspace.
+ * Values of matching user and kernel space enumerators MUST be the same.
+ * MALI_ERROR_NONE is guaranteed to be 0.
+ */
+enum mali_error {
+ MALI_ERROR_NONE = 0,
+ MALI_ERROR_OUT_OF_GPU_MEMORY,
+ MALI_ERROR_OUT_OF_MEMORY,
+ MALI_ERROR_FUNCTION_FAILED,
+};
+
+enum {
+ inited_mem = (1u << 0),
+ inited_js = (1u << 1),
+ inited_pm_runtime_init = (1u << 2),
+#ifdef CONFIG_MALI_DEVFREQ
+ inited_devfreq = (1u << 3),
+#endif /* CONFIG_MALI_DEVFREQ */
+ inited_tlstream = (1u << 4),
+ inited_backend_early = (1u << 5),
+ inited_backend_late = (1u << 6),
+ inited_device = (1u << 7),
+ inited_vinstr = (1u << 8),
+#ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
+ inited_ipa = (1u << 9),
+#endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
+ inited_job_fault = (1u << 10),
+ inited_misc_register = (1u << 11),
+ inited_get_device = (1u << 12),
+ inited_sysfs_group = (1u << 13),
+ inited_dev_list = (1u << 14),
+ inited_debugfs = (1u << 15),
+ inited_gpu_device = (1u << 16),
+ inited_registers_map = (1u << 17),
+ inited_io_history = (1u << 18),
+ inited_power_control = (1u << 19),
+ inited_buslogger = (1u << 20)
+};
+
+
+#ifdef CONFIG_MALI_DEBUG
+#define INACTIVE_WAIT_MS (5000)
+
+void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive)
+{
+ kbdev->driver_inactive = inactive;
+ wake_up(&kbdev->driver_inactive_wait);
+
+ /* Wait for any running IOCTLs to complete */
+ if (inactive)
+ msleep(INACTIVE_WAIT_MS);
+}
+KBASE_EXPORT_TEST_API(kbase_set_driver_inactive);
+#endif /* CONFIG_MALI_DEBUG */
+
+static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
+{
+ struct kbase_device *kbdev;
+ union uk_header *ukh = args;
+ u32 id;
+ int ret = 0;
+
+ KBASE_DEBUG_ASSERT(ukh != NULL);
+
+ kbdev = kctx->kbdev;
+ id = ukh->id;
+ ukh->ret = MALI_ERROR_NONE; /* Be optimistic */
+
+#ifdef CONFIG_MALI_DEBUG
+ wait_event(kbdev->driver_inactive_wait,
+ kbdev->driver_inactive == false);
+#endif /* CONFIG_MALI_DEBUG */
+
+ if (UKP_FUNC_ID_CHECK_VERSION == id) {
+ struct uku_version_check_args *version_check;
+
+ if (args_size != sizeof(struct uku_version_check_args)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ return 0;
+ }
+ version_check = (struct uku_version_check_args *)args;
+ kbase_api_handshake(version_check);
+ /* save the proposed version number for later use */
+ kctx->api_version = KBASE_API_VERSION(version_check->major,
+ version_check->minor);
+ ukh->ret = MALI_ERROR_NONE;
+ return 0;
+ }
+
+ /* block calls until version handshake */
+ if (kctx->api_version == 0)
+ return -EINVAL;
+
+ if (!atomic_read(&kctx->setup_complete)) {
+ struct kbase_uk_set_flags *kbase_set_flags;
+
+ /* setup pending, try to signal that we'll do the setup,
+ * if setup was already in progress, err this call
+ */
+ if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
+ return -EINVAL;
+
+ /* if unexpected call, will stay stuck in setup mode
+ * (is it the only call we accept?)
+ */
+ if (id != KBASE_FUNC_SET_FLAGS)
+ return -EINVAL;
+
+ kbase_set_flags = (struct kbase_uk_set_flags *)args;
+
+ /* if not matching the expected call, stay in setup mode */
+ if (sizeof(*kbase_set_flags) != args_size)
+ goto bad_size;
+
+ /* if bad flags, will stay stuck in setup mode */
+ if (kbase_context_set_create_flags(kctx,
+ kbase_set_flags->create_flags) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+
+ atomic_set(&kctx->setup_complete, 1);
+ return 0;
+ }
+
+ /* setup complete, perform normal operation */
+ switch (id) {
+ case KBASE_FUNC_MEM_JIT_INIT:
+ {
+ struct kbase_uk_mem_jit_init *jit_init = args;
+
+ if (sizeof(*jit_init) != args_size)
+ goto bad_size;
+
+ if (kbase_region_tracker_init_jit(kctx,
+ jit_init->va_pages))
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ case KBASE_FUNC_MEM_ALLOC:
+ {
+ struct kbase_uk_mem_alloc *mem = args;
+ struct kbase_va_region *reg;
+
+ if (sizeof(*mem) != args_size)
+ goto bad_size;
+
+#if defined(CONFIG_64BIT)
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /* force SAME_VA if a 64-bit client */
+ mem->flags |= BASE_MEM_SAME_VA;
+ }
+#endif
+
+ reg = kbase_mem_alloc(kctx, mem->va_pages,
+ mem->commit_pages, mem->extent,
+ &mem->flags, &mem->gpu_va,
+ &mem->va_alignment);
+ if (!reg)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ case KBASE_FUNC_MEM_IMPORT: {
+ struct kbase_uk_mem_import *mem_import = args;
+ void __user *phandle;
+
+ if (sizeof(*mem_import) != args_size)
+ goto bad_size;
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ phandle = compat_ptr(mem_import->phandle.compat_value);
+ else
+#endif
+ phandle = mem_import->phandle.value;
+
+ if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ if (kbase_mem_import(kctx,
+ (enum base_mem_import_type)
+ mem_import->type,
+ phandle,
+ &mem_import->gpu_va,
+ &mem_import->va_pages,
+ &mem_import->flags)) {
+ mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID;
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ }
+ break;
+ }
+ case KBASE_FUNC_MEM_ALIAS: {
+ struct kbase_uk_mem_alias *alias = args;
+ struct base_mem_aliasing_info __user *user_ai;
+ struct base_mem_aliasing_info *ai;
+
+ if (sizeof(*alias) != args_size)
+ goto bad_size;
+
+ if (alias->nents > 2048) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ if (!alias->nents) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ user_ai = compat_ptr(alias->ai.compat_value);
+ else
+#endif
+ user_ai = alias->ai.value;
+
+ ai = vmalloc(sizeof(*ai) * alias->nents);
+
+ if (!ai) {
+ ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
+ break;
+ }
+
+ if (copy_from_user(ai, user_ai,
+ sizeof(*ai) * alias->nents)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ goto copy_failed;
+ }
+
+ alias->gpu_va = kbase_mem_alias(kctx, &alias->flags,
+ alias->stride,
+ alias->nents, ai,
+ &alias->va_pages);
+ if (!alias->gpu_va) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ goto no_alias;
+ }
+no_alias:
+copy_failed:
+ vfree(ai);
+ break;
+ }
+ case KBASE_FUNC_MEM_COMMIT:
+ {
+ struct kbase_uk_mem_commit *commit = args;
+
+ if (sizeof(*commit) != args_size)
+ goto bad_size;
+
+ if (commit->gpu_addr & ~PAGE_MASK) {
+ dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid");
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ if (kbase_mem_commit(kctx, commit->gpu_addr,
+ commit->pages,
+ (base_backing_threshold_status *)
+ &commit->result_subcode) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+
+ break;
+ }
+
+ case KBASE_FUNC_MEM_QUERY:
+ {
+ struct kbase_uk_mem_query *query = args;
+
+ if (sizeof(*query) != args_size)
+ goto bad_size;
+
+ if (query->gpu_addr & ~PAGE_MASK) {
+ dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid");
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE &&
+ query->query != KBASE_MEM_QUERY_VA_SIZE &&
+ query->query != KBASE_MEM_QUERY_FLAGS) {
+ dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query);
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ if (kbase_mem_query(kctx, query->gpu_addr,
+ query->query, &query->value) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+ break;
+ }
+ break;
+
+ case KBASE_FUNC_MEM_FLAGS_CHANGE:
+ {
+ struct kbase_uk_mem_flags_change *fc = args;
+
+ if (sizeof(*fc) != args_size)
+ goto bad_size;
+
+ if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) {
+ dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid");
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ if (kbase_mem_flags_change(kctx, fc->gpu_va,
+ fc->flags, fc->mask) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+
+ break;
+ }
+ case KBASE_FUNC_MEM_FREE:
+ {
+ struct kbase_uk_mem_free *mem = args;
+
+ if (sizeof(*mem) != args_size)
+ goto bad_size;
+
+ if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) {
+ dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid");
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ if (kbase_mem_free(kctx, mem->gpu_addr) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ case KBASE_FUNC_JOB_SUBMIT:
+ {
+ struct kbase_uk_job_submit *job = args;
+
+ if (sizeof(*job) != args_size)
+ goto bad_size;
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+ if (kbase_jd_submit(kctx, job, 0) != 0)
+#else
+ if (kbase_jd_submit(kctx, job) != 0)
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+ case KBASE_FUNC_JOB_SUBMIT_UK6:
+ {
+ struct kbase_uk_job_submit *job = args;
+
+ if (sizeof(*job) != args_size)
+ goto bad_size;
+
+ if (kbase_jd_submit(kctx, job, 1) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+#endif
+
+ case KBASE_FUNC_SYNC:
+ {
+ struct kbase_uk_sync_now *sn = args;
+
+ if (sizeof(*sn) != args_size)
+ goto bad_size;
+
+ if (sn->sset.basep_sset.mem_handle.basep.handle & ~PAGE_MASK) {
+ dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid");
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+#ifndef CONFIG_MALI_COH_USER
+ if (kbase_sync_now(kctx, &sn->sset) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+#endif
+ break;
+ }
+
+ case KBASE_FUNC_DISJOINT_QUERY:
+ {
+ struct kbase_uk_disjoint_query *dquery = args;
+
+ if (sizeof(*dquery) != args_size)
+ goto bad_size;
+
+ /* Get the disjointness counter value. */
+ dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
+ break;
+ }
+
+ case KBASE_FUNC_POST_TERM:
+ {
+ kbase_event_close(kctx);
+ break;
+ }
+
+ case KBASE_FUNC_HWCNT_SETUP:
+ {
+ struct kbase_uk_hwcnt_setup *setup = args;
+
+ if (sizeof(*setup) != args_size)
+ goto bad_size;
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx,
+ &kctx->vinstr_cli, setup) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ mutex_unlock(&kctx->vinstr_cli_lock);
+ break;
+ }
+
+ case KBASE_FUNC_HWCNT_DUMP:
+ {
+ /* args ignored */
+ mutex_lock(&kctx->vinstr_cli_lock);
+ if (kbase_vinstr_hwc_dump(kctx->vinstr_cli,
+ BASE_HWCNT_READER_EVENT_MANUAL) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ mutex_unlock(&kctx->vinstr_cli_lock);
+ break;
+ }
+
+ case KBASE_FUNC_HWCNT_CLEAR:
+ {
+ /* args ignored */
+ mutex_lock(&kctx->vinstr_cli_lock);
+ if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ mutex_unlock(&kctx->vinstr_cli_lock);
+ break;
+ }
+
+ case KBASE_FUNC_HWCNT_READER_SETUP:
+ {
+ struct kbase_uk_hwcnt_reader_setup *setup = args;
+
+ if (sizeof(*setup) != args_size)
+ goto bad_size;
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx,
+ setup) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ mutex_unlock(&kctx->vinstr_cli_lock);
+ break;
+ }
+
+ case KBASE_FUNC_GPU_PROPS_REG_DUMP:
+ {
+ struct kbase_uk_gpuprops *setup = args;
+
+ if (sizeof(*setup) != args_size)
+ goto bad_size;
+
+ if (kbase_gpuprops_uk_get_props(kctx, setup) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+ case KBASE_FUNC_FIND_CPU_OFFSET:
+ {
+ struct kbase_uk_find_cpu_offset *find = args;
+
+ if (sizeof(*find) != args_size)
+ goto bad_size;
+
+ if (find->gpu_addr & ~PAGE_MASK) {
+ dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
+ goto out_bad;
+ }
+
+ if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ } else {
+ int err;
+
+ err = kbasep_find_enclosing_cpu_mapping_offset(
+ kctx,
+ find->gpu_addr,
+ (uintptr_t) find->cpu_addr,
+ (size_t) find->size,
+ &find->offset);
+
+ if (err)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ }
+ break;
+ }
+ case KBASE_FUNC_GET_VERSION:
+ {
+ struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
+
+ if (sizeof(*get_version) != args_size)
+ goto bad_size;
+
+ /* version buffer size check is made in compile time assert */
+ memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
+ get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+ break;
+ }
+
+ case KBASE_FUNC_STREAM_CREATE:
+ {
+#ifdef CONFIG_SYNC
+ struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
+
+ if (sizeof(*screate) != args_size)
+ goto bad_size;
+
+ if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) {
+ /* not NULL terminated */
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+
+ if (kbase_stream_create(screate->name, &screate->fd) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+#else /* CONFIG_SYNC */
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+#endif /* CONFIG_SYNC */
+ break;
+ }
+ case KBASE_FUNC_FENCE_VALIDATE:
+ {
+#ifdef CONFIG_SYNC
+ struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
+
+ if (sizeof(*fence_validate) != args_size)
+ goto bad_size;
+
+ if (kbase_fence_validate(fence_validate->fd) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+#endif /* CONFIG_SYNC */
+ break;
+ }
+
+ case KBASE_FUNC_SET_TEST_DATA:
+ {
+#if MALI_UNIT_TEST
+ struct kbase_uk_set_test_data *set_data = args;
+
+ shared_kernel_test_data = set_data->test_data;
+ shared_kernel_test_data.kctx.value = (void __user *)kctx;
+ shared_kernel_test_data.mm.value = (void __user *)current->mm;
+ ukh->ret = MALI_ERROR_NONE;
+#endif /* MALI_UNIT_TEST */
+ break;
+ }
+
+ case KBASE_FUNC_INJECT_ERROR:
+ {
+#ifdef CONFIG_MALI_ERROR_INJECT
+ unsigned long flags;
+ struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
+
+ /*mutex lock */
+ spin_lock_irqsave(&kbdev->reg_op_lock, flags);
+ if (job_atom_inject_error(&params) != 0)
+ ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+ spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
+ /*mutex unlock */
+#endif /* CONFIG_MALI_ERROR_INJECT */
+ break;
+ }
+
+ case KBASE_FUNC_MODEL_CONTROL:
+ {
+#ifdef CONFIG_MALI_NO_MALI
+ unsigned long flags;
+ struct kbase_model_control_params params =
+ ((struct kbase_uk_model_control_params *)args)->params;
+
+ /*mutex lock */
+ spin_lock_irqsave(&kbdev->reg_op_lock, flags);
+ if (gpu_model_control(kbdev->model, &params) != 0)
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ else
+ ukh->ret = MALI_ERROR_NONE;
+ spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
+ /*mutex unlock */
+#endif /* CONFIG_MALI_NO_MALI */
+ break;
+ }
+
+#ifdef BASE_LEGACY_UK8_SUPPORT
+ case KBASE_FUNC_KEEP_GPU_POWERED:
+ {
+ dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n");
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ break;
+ }
+#endif /* BASE_LEGACY_UK8_SUPPORT */
+
+ case KBASE_FUNC_GET_PROFILING_CONTROLS:
+ {
+ struct kbase_uk_profiling_controls *controls =
+ (struct kbase_uk_profiling_controls *)args;
+ u32 i;
+
+ if (sizeof(*controls) != args_size)
+ goto bad_size;
+
+ for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
+ controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
+
+ break;
+ }
+
+ /* used only for testing purposes; these controls are to be set by gator through gator API */
+ case KBASE_FUNC_SET_PROFILING_CONTROLS:
+ {
+ struct kbase_uk_profiling_controls *controls =
+ (struct kbase_uk_profiling_controls *)args;
+ u32 i;
+
+ if (sizeof(*controls) != args_size)
+ goto bad_size;
+
+ for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
+ _mali_profiling_control(i, controls->profiling_controls[i]);
+
+ break;
+ }
+
+ case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
+ {
+ struct kbase_uk_debugfs_mem_profile_add *add_data =
+ (struct kbase_uk_debugfs_mem_profile_add *)args;
+ char *buf;
+ char __user *user_buf;
+
+ if (sizeof(*add_data) != args_size)
+ goto bad_size;
+
+ if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
+ dev_err(kbdev->dev, "buffer too big\n");
+ goto out_bad;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ user_buf = compat_ptr(add_data->buf.compat_value);
+ else
+#endif
+ user_buf = add_data->buf.value;
+
+ buf = kmalloc(add_data->len, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf))
+ goto out_bad;
+
+ if (0 != copy_from_user(buf, user_buf, add_data->len)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ kfree(buf);
+ goto out_bad;
+ }
+
+ if (kbasep_mem_profile_debugfs_insert(kctx, buf,
+ add_data->len)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ kfree(buf);
+ goto out_bad;
+ }
+
+ break;
+ }
+
+#ifdef CONFIG_MALI_NO_MALI
+ case KBASE_FUNC_SET_PRFCNT_VALUES:
+ {
+
+ struct kbase_uk_prfcnt_values *params =
+ ((struct kbase_uk_prfcnt_values *)args);
+ gpu_model_set_dummy_prfcnt_sample(params->data,
+ params->size);
+
+ break;
+ }
+#endif /* CONFIG_MALI_NO_MALI */
+#ifdef BASE_LEGACY_UK10_4_SUPPORT
+ case KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4:
+ {
+ struct kbase_uk_tlstream_acquire_v10_4 *tlstream_acquire
+ = args;
+
+ if (sizeof(*tlstream_acquire) != args_size)
+ goto bad_size;
+
+ if (0 != kbase_tlstream_acquire(
+ kctx,
+ &tlstream_acquire->fd, 0)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ } else if (0 <= tlstream_acquire->fd) {
+ /* Summary stream was cleared during acquire.
+ * Create static timeline objects that will be
+ * read by client. */
+ kbase_create_timeline_objects(kctx);
+ }
+ break;
+ }
+#endif /* BASE_LEGACY_UK10_4_SUPPORT */
+ case KBASE_FUNC_TLSTREAM_ACQUIRE:
+ {
+ struct kbase_uk_tlstream_acquire *tlstream_acquire =
+ args;
+
+ if (sizeof(*tlstream_acquire) != args_size)
+ goto bad_size;
+
+ if (tlstream_acquire->flags & ~BASE_TLSTREAM_FLAGS_MASK)
+ goto out_bad;
+
+ if (0 != kbase_tlstream_acquire(
+ kctx,
+ &tlstream_acquire->fd,
+ tlstream_acquire->flags)) {
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+ } else if (0 <= tlstream_acquire->fd) {
+ /* Summary stream was cleared during acquire.
+ * Create static timeline objects that will be
+ * read by client. */
+ kbase_create_timeline_objects(kctx);
+ }
+ break;
+ }
+ case KBASE_FUNC_TLSTREAM_FLUSH:
+ {
+ struct kbase_uk_tlstream_flush *tlstream_flush =
+ args;
+
+ if (sizeof(*tlstream_flush) != args_size)
+ goto bad_size;
+
+ kbase_tlstream_flush_streams();
+ break;
+ }
+#if MALI_UNIT_TEST
+ case KBASE_FUNC_TLSTREAM_TEST:
+ {
+ struct kbase_uk_tlstream_test *tlstream_test = args;
+
+ if (sizeof(*tlstream_test) != args_size)
+ goto bad_size;
+
+ kbase_tlstream_test(
+ tlstream_test->tpw_count,
+ tlstream_test->msg_delay,
+ tlstream_test->msg_count,
+ tlstream_test->aux_msg);
+ break;
+ }
+ case KBASE_FUNC_TLSTREAM_STATS:
+ {
+ struct kbase_uk_tlstream_stats *tlstream_stats = args;
+
+ if (sizeof(*tlstream_stats) != args_size)
+ goto bad_size;
+
+ kbase_tlstream_stats(
+ &tlstream_stats->bytes_collected,
+ &tlstream_stats->bytes_generated);
+ break;
+ }
+#endif /* MALI_UNIT_TEST */
+
+ case KBASE_FUNC_GET_CONTEXT_ID:
+ {
+ struct kbase_uk_context_id *info = args;
+
+ info->id = kctx->id;
+ break;
+ }
+
+ case KBASE_FUNC_SOFT_EVENT_UPDATE:
+ {
+ struct kbase_uk_soft_event_update *update = args;
+
+ if (sizeof(*update) != args_size)
+ goto bad_size;
+
+ if (((update->new_status != BASE_JD_SOFT_EVENT_SET) &&
+ (update->new_status != BASE_JD_SOFT_EVENT_RESET)) ||
+ (update->flags != 0))
+ goto out_bad;
+
+ if (kbase_soft_event_update(kctx, update->evt,
+ update->new_status))
+ ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+
+ break;
+ }
+
+ default:
+ dev_err(kbdev->dev, "unknown ioctl %u\n", id);
+ goto out_bad;
+ }
+
+ return ret;
+
+ bad_size:
+ dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id);
+ out_bad:
+ return -EINVAL;
+}
+
+static struct kbase_device *to_kbase_device(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+static int assign_irqs(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+ int i;
+
+ if (!kbdev)
+ return -ENODEV;
+
+ /* 3 IRQ resources */
+ for (i = 0; i < 3; i++) {
+ struct resource *irq_res;
+ int irqtag;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!irq_res) {
+ dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
+ return -ENOENT;
+ }
+
+#ifdef CONFIG_OF
+ if (!strcmp(irq_res->name, "JOB")) {
+ irqtag = JOB_IRQ_TAG;
+ } else if (!strcmp(irq_res->name, "MMU")) {
+ irqtag = MMU_IRQ_TAG;
+ } else if (!strcmp(irq_res->name, "GPU")) {
+ irqtag = GPU_IRQ_TAG;
+ } else {
+ dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
+ irq_res->name);
+ return -EINVAL;
+ }
+#else
+ irqtag = i;
+#endif /* CONFIG_OF */
+ kbdev->irqs[irqtag].irq = irq_res->start;
+ kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
+ }
+
+ return 0;
+}
+
+/*
+ * API to acquire device list mutex and
+ * return pointer to the device list head
+ */
+const struct list_head *kbase_dev_list_get(void)
+{
+ mutex_lock(&kbase_dev_list_lock);
+ return &kbase_dev_list;
+}
+KBASE_EXPORT_TEST_API(kbase_dev_list_get);
+
+/* API to release the device list mutex */
+void kbase_dev_list_put(const struct list_head *dev_list)
+{
+ mutex_unlock(&kbase_dev_list_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_dev_list_put);
+
+/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
+struct kbase_device *kbase_find_device(int minor)
+{
+ struct kbase_device *kbdev = NULL;
+ struct list_head *entry;
+ const struct list_head *dev_list = kbase_dev_list_get();
+
+ list_for_each(entry, dev_list) {
+ struct kbase_device *tmp;
+
+ tmp = list_entry(entry, struct kbase_device, entry);
+ if (tmp->mdev.minor == minor || minor == -1) {
+ kbdev = tmp;
+ get_device(kbdev->dev);
+ break;
+ }
+ }
+ kbase_dev_list_put(dev_list);
+
+ return kbdev;
+}
+EXPORT_SYMBOL(kbase_find_device);
+
+void kbase_release_device(struct kbase_device *kbdev)
+{
+ put_device(kbdev->dev);
+}
+EXPORT_SYMBOL(kbase_release_device);
+
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
+/*
+ * Older versions, before v4.6, of the kernel doesn't have
+ * kstrtobool_from_user().
+ */
+static int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
+{
+ char buf[32];
+
+ count = min(sizeof(buf), count);
+
+ if (copy_from_user(buf, s, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ return strtobool(buf, res);
+}
+#endif
+
+static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
+{
+ struct kbase_context *kctx = f->private_data;
+ int err;
+ bool value;
+
+ err = kstrtobool_from_user(ubuf, size, &value);
+ if (err)
+ return err;
+
+ if (value)
+ kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
+ else
+ kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
+
+ return size;
+}
+
+static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
+{
+ struct kbase_context *kctx = f->private_data;
+ char buf[32];
+ int count;
+ bool value;
+
+ value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
+
+ count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
+
+ return simple_read_from_buffer(ubuf, size, off, buf, count);
+}
+
+static const struct file_operations kbase_infinite_cache_fops = {
+ .open = simple_open,
+ .write = write_ctx_infinite_cache,
+ .read = read_ctx_infinite_cache,
+};
+
+static int kbase_open(struct inode *inode, struct file *filp)
+{
+ struct kbase_device *kbdev = NULL;
+ struct kbase_context *kctx;
+ int ret = 0;
+#ifdef CONFIG_DEBUG_FS
+ char kctx_name[64];
+#endif
+
+ kbdev = kbase_find_device(iminor(inode));
+
+ if (!kbdev)
+ return -ENODEV;
+
+ kctx = kbase_create_context(kbdev, is_compat_task());
+ if (!kctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ init_waitqueue_head(&kctx->event_queue);
+ filp->private_data = kctx;
+ kctx->filp = filp;
+
+ if (kbdev->infinite_cache_active_default)
+ kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
+
+#ifdef CONFIG_DEBUG_FS
+ snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
+
+ kctx->kctx_dentry = debugfs_create_dir(kctx_name,
+ kbdev->debugfs_ctx_directory);
+
+ if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+#ifdef CONFIG_MALI_COH_USER
+ /* if cache is completely coherent at hardware level, then remove the
+ * infinite cache control support from debugfs.
+ */
+#else
+ debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
+ kctx, &kbase_infinite_cache_fops);
+#endif /* CONFIG_MALI_COH_USER */
+
+ mutex_init(&kctx->mem_profile_lock);
+
+ kbasep_jd_debugfs_ctx_init(kctx);
+ kbase_debug_mem_view_init(filp);
+
+ kbase_debug_job_fault_context_init(kctx);
+
+ kbase_mem_pool_debugfs_init(kctx->kctx_dentry, &kctx->mem_pool);
+
+ kbase_jit_debugfs_init(kctx);
+#endif /* CONFIG_DEBUG_FS */
+
+ dev_dbg(kbdev->dev, "created base context\n");
+
+ {
+ struct kbasep_kctx_list_element *element;
+
+ element = kzalloc(sizeof(*element), GFP_KERNEL);
+ if (element) {
+ mutex_lock(&kbdev->kctx_list_lock);
+ element->kctx = kctx;
+ list_add(&element->link, &kbdev->kctx_list);
+ kbase_tlstream_tl_new_ctx(
+ element->kctx,
+ (u32)(element->kctx->id),
+ (u32)(element->kctx->tgid));
+ mutex_unlock(&kbdev->kctx_list_lock);
+ } else {
+ /* we don't treat this as a fail - just warn about it */
+ dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
+ }
+ }
+ return 0;
+
+ out:
+ kbase_release_device(kbdev);
+ return ret;
+}
+
+static int kbase_release(struct inode *inode, struct file *filp)
+{
+ struct kbase_context *kctx = filp->private_data;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbasep_kctx_list_element *element, *tmp;
+ bool found_element = false;
+
+ kbase_tlstream_tl_del_ctx(kctx);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(kctx->kctx_dentry);
+ kbasep_mem_profile_debugfs_remove(kctx);
+ kbase_debug_job_fault_context_term(kctx);
+#endif
+
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
+ if (element->kctx == kctx) {
+ list_del(&element->link);
+ kfree(element);
+ found_element = true;
+ }
+ }
+ mutex_unlock(&kbdev->kctx_list_lock);
+ if (!found_element)
+ dev_warn(kbdev->dev, "kctx not in kctx_list\n");
+
+ filp->private_data = NULL;
+
+ mutex_lock(&kctx->vinstr_cli_lock);
+ /* If this client was performing hwcnt dumping and did not explicitly
+ * detach itself, remove it from the vinstr core now */
+ if (kctx->vinstr_cli) {
+ struct kbase_uk_hwcnt_setup setup;
+
+ setup.dump_buffer = 0llu;
+ kbase_vinstr_legacy_hwc_setup(
+ kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
+ }
+ mutex_unlock(&kctx->vinstr_cli_lock);
+
+ kbase_destroy_context(kctx);
+
+ dev_dbg(kbdev->dev, "deleted base context\n");
+ kbase_release_device(kbdev);
+ return 0;
+}
+
+#define CALL_MAX_SIZE 536
+
+static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */
+ u32 size = _IOC_SIZE(cmd);
+ struct kbase_context *kctx = filp->private_data;
+
+ if (size > CALL_MAX_SIZE)
+ return -ENOTTY;
+
+ if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
+ dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
+ return -EFAULT;
+ }
+
+ if (kbase_dispatch(kctx, &msg, size) != 0)
+ return -EFAULT;
+
+ if (0 != copy_to_user((void __user *)arg, &msg, size)) {
+ dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct kbase_context *kctx = filp->private_data;
+ struct base_jd_event_v2 uevent;
+ int out_count = 0;
+
+ if (count < sizeof(uevent))
+ return -ENOBUFS;
+
+ do {
+ while (kbase_event_dequeue(kctx, &uevent)) {
+ if (out_count > 0)
+ goto out;
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(kctx->event_queue,
+ kbase_event_pending(kctx)) != 0)
+ return -ERESTARTSYS;
+ }
+ if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
+ if (out_count == 0)
+ return -EPIPE;
+ goto out;
+ }
+
+ if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
+ return -EFAULT;
+
+ buf += sizeof(uevent);
+ out_count++;
+ count -= sizeof(uevent);
+ } while (count >= sizeof(uevent));
+
+ out:
+ return out_count * sizeof(uevent);
+}
+
+static unsigned int kbase_poll(struct file *filp, poll_table *wait)
+{
+ struct kbase_context *kctx = filp->private_data;
+
+ poll_wait(filp, &kctx->event_queue, wait);
+ if (kbase_event_pending(kctx))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+void kbase_event_wakeup(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+
+ wake_up_interruptible(&kctx->event_queue);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_wakeup);
+
+static int kbase_check_flags(int flags)
+{
+ /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
+ * closes the file descriptor in a child process.
+ */
+ if (0 == (flags & O_CLOEXEC))
+ return -EINVAL;
+
+ return 0;
+}
+
+#ifdef CONFIG_64BIT
+/* The following function is taken from the kernel and just
+ * renamed. As it's not exported to modules we must copy-paste it here.
+ */
+
+static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
+ *info)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long length, low_limit, high_limit, gap_start, gap_end;
+
+ /* Adjust search length to account for worst case alignment overhead */
+ length = info->length + info->align_mask;
+ if (length < info->length)
+ return -ENOMEM;
+
+ /*
+ * Adjust search limits by the desired length.
+ * See implementation comment at top of unmapped_area().
+ */
+ gap_end = info->high_limit;
+ if (gap_end < length)
+ return -ENOMEM;
+ high_limit = gap_end - length;
+
+ if (info->low_limit > high_limit)
+ return -ENOMEM;
+ low_limit = info->low_limit + length;
+
+ /* Check highest gap, which does not precede any rbtree node */
+ gap_start = mm->highest_vm_end;
+ if (gap_start <= high_limit)
+ goto found_highest;
+
+ /* Check if rbtree root looks promising */
+ if (RB_EMPTY_ROOT(&mm->mm_rb))
+ return -ENOMEM;
+ vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+ if (vma->rb_subtree_gap < length)
+ return -ENOMEM;
+
+ while (true) {
+ /* Visit right subtree if it looks promising */
+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+ struct vm_area_struct *right =
+ rb_entry(vma->vm_rb.rb_right,
+ struct vm_area_struct, vm_rb);
+ if (right->rb_subtree_gap >= length) {
+ vma = right;
+ continue;
+ }
+ }
+
+check_current:
+ /* Check if current node has a suitable gap */
+ gap_end = vma->vm_start;
+ if (gap_end < low_limit)
+ return -ENOMEM;
+ if (gap_start <= high_limit && gap_end - gap_start >= length)
+ goto found;
+
+ /* Visit left subtree if it looks promising */
+ if (vma->vm_rb.rb_left) {
+ struct vm_area_struct *left =
+ rb_entry(vma->vm_rb.rb_left,
+ struct vm_area_struct, vm_rb);
+ if (left->rb_subtree_gap >= length) {
+ vma = left;
+ continue;
+ }
+ }
+
+ /* Go back up the rbtree to find next candidate node */
+ while (true) {
+ struct rb_node *prev = &vma->vm_rb;
+ if (!rb_parent(prev))
+ return -ENOMEM;
+ vma = rb_entry(rb_parent(prev),
+ struct vm_area_struct, vm_rb);
+ if (prev == vma->vm_rb.rb_right) {
+ gap_start = vma->vm_prev ?
+ vma->vm_prev->vm_end : 0;
+ goto check_current;
+ }
+ }
+ }
+
+found:
+ /* We found a suitable gap. Clip it with the original high_limit. */
+ if (gap_end > info->high_limit)
+ gap_end = info->high_limit;
+
+found_highest:
+ /* Compute highest gap address at the desired alignment */
+ gap_end -= info->length;
+ gap_end -= (gap_end - info->align_offset) & info->align_mask;
+
+ VM_BUG_ON(gap_end < info->low_limit);
+ VM_BUG_ON(gap_end < gap_start);
+ return gap_end;
+}
+
+
+static unsigned long kbase_get_unmapped_area(struct file *filp,
+ const unsigned long addr, const unsigned long len,
+ const unsigned long pgoff, const unsigned long flags)
+{
+ /* based on get_unmapped_area, but simplified slightly due to that some
+ * values are known in advance */
+ struct kbase_context *kctx = filp->private_data;
+ struct mm_struct *mm = current->mm;
+ struct vm_unmapped_area_info info;
+
+ /* err on fixed address */
+ if ((flags & MAP_FIXED) || addr)
+ return -EINVAL;
+
+ /* too big? */
+ if (len > TASK_SIZE - SZ_2M)
+ return -ENOMEM;
+
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ return current->mm->get_unmapped_area(filp, addr, len, pgoff,
+ flags);
+
+ if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
+ info.high_limit = kctx->same_va_end << PAGE_SHIFT;
+ info.align_mask = 0;
+ info.align_offset = 0;
+ } else {
+ info.high_limit = min_t(unsigned long, mm->mmap_base,
+ (kctx->same_va_end << PAGE_SHIFT));
+ if (len >= SZ_2M) {
+ info.align_offset = SZ_2M;
+ info.align_mask = SZ_2M - 1;
+ } else {
+ info.align_mask = 0;
+ info.align_offset = 0;
+ }
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = SZ_2M;
+ return kbase_unmapped_area_topdown(&info);
+}
+#endif
+
+static const struct file_operations kbase_fops = {
+ .owner = THIS_MODULE,
+ .open = kbase_open,
+ .release = kbase_release,
+ .read = kbase_read,
+ .poll = kbase_poll,
+ .unlocked_ioctl = kbase_ioctl,
+ .compat_ioctl = kbase_ioctl,
+ .mmap = kbase_mmap,
+ .check_flags = kbase_check_flags,
+#ifdef CONFIG_64BIT
+ .get_unmapped_area = kbase_get_unmapped_area,
+#endif
+};
+
+#ifndef CONFIG_MALI_NO_MALI
+void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
+{
+ writel(value, kbdev->reg + offset);
+}
+
+u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
+{
+ return readl(kbdev->reg + offset);
+}
+#endif /* !CONFIG_MALI_NO_MALI */
+
+/** Show callback for the @c power_policy sysfs file.
+ *
+ * This function is called to get the contents of the @c power_policy sysfs
+ * file. This is a list of the available policies with the currently active one
+ * surrounded by square brackets.
+ *
+ * @param dev The device this sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The output buffer for the sysfs file contents
+ *
+ * @return The number of bytes output to @c buf.
+ */
+static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
+{
+ struct kbase_device *kbdev;
+ const struct kbase_pm_policy *current_policy;
+ const struct kbase_pm_policy *const *policy_list;
+ int policy_count;
+ int i;
+ ssize_t ret = 0;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ current_policy = kbase_pm_get_policy(kbdev);
+
+ policy_count = kbase_pm_list_policies(&policy_list);
+
+ for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
+ if (policy_list[i] == current_policy)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
+ else
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
+ }
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/** Store callback for the @c power_policy sysfs file.
+ *
+ * This function is called when the @c power_policy sysfs file is written to.
+ * It matches the requested policy against the available policies and if a
+ * matching policy is found calls @ref kbase_pm_set_policy to change the
+ * policy.
+ *
+ * @param dev The device with sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The value written to the sysfs file
+ * @param count The number of bytes written to the sysfs file
+ *
+ * @return @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ const struct kbase_pm_policy *new_policy = NULL;
+ const struct kbase_pm_policy *const *policy_list;
+ int policy_count;
+ int i;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ policy_count = kbase_pm_list_policies(&policy_list);
+
+ for (i = 0; i < policy_count; i++) {
+ if (sysfs_streq(policy_list[i]->name, buf)) {
+ new_policy = policy_list[i];
+ break;
+ }
+ }
+
+ if (!new_policy) {
+ dev_err(dev, "power_policy: policy not found\n");
+ return -EINVAL;
+ }
+
+ kbase_pm_set_policy(kbdev, new_policy);
+
+ return count;
+}
+
+/** The sysfs file @c power_policy.
+ *
+ * This is used for obtaining information about the available policies,
+ * determining which policy is currently active, and changing the active
+ * policy.
+ */
+static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
+
+/** Show callback for the @c core_availability_policy sysfs file.
+ *
+ * This function is called to get the contents of the @c core_availability_policy
+ * sysfs file. This is a list of the available policies with the currently
+ * active one surrounded by square brackets.
+ *
+ * @param dev The device this sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The output buffer for the sysfs file contents
+ *
+ * @return The number of bytes output to @c buf.
+ */
+static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ const struct kbase_pm_ca_policy *current_policy;
+ const struct kbase_pm_ca_policy *const *policy_list;
+ int policy_count;
+ int i;
+ ssize_t ret = 0;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ current_policy = kbase_pm_ca_get_policy(kbdev);
+
+ policy_count = kbase_pm_ca_list_policies(&policy_list);
+
+ for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
+ if (policy_list[i] == current_policy)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
+ else
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
+ }
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/** Store callback for the @c core_availability_policy sysfs file.
+ *
+ * This function is called when the @c core_availability_policy sysfs file is
+ * written to. It matches the requested policy against the available policies
+ * and if a matching policy is found calls @ref kbase_pm_set_policy to change
+ * the policy.
+ *
+ * @param dev The device with sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The value written to the sysfs file
+ * @param count The number of bytes written to the sysfs file
+ *
+ * @return @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ const struct kbase_pm_ca_policy *new_policy = NULL;
+ const struct kbase_pm_ca_policy *const *policy_list;
+ int policy_count;
+ int i;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ policy_count = kbase_pm_ca_list_policies(&policy_list);
+
+ for (i = 0; i < policy_count; i++) {
+ if (sysfs_streq(policy_list[i]->name, buf)) {
+ new_policy = policy_list[i];
+ break;
+ }
+ }
+
+ if (!new_policy) {
+ dev_err(dev, "core_availability_policy: policy not found\n");
+ return -EINVAL;
+ }
+
+ kbase_pm_ca_set_policy(kbdev, new_policy);
+
+ return count;
+}
+
+/** The sysfs file @c core_availability_policy
+ *
+ * This is used for obtaining information about the available policies,
+ * determining which policy is currently active, and changing the active
+ * policy.
+ */
+static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
+
+/** Show callback for the @c core_mask sysfs file.
+ *
+ * This function is called to get the contents of the @c core_mask sysfs
+ * file.
+ *
+ * @param dev The device this sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The output buffer for the sysfs file contents
+ *
+ * @return The number of bytes output to @c buf.
+ */
+static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret = 0;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "Current core mask (JS0) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[0]);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "Current core mask (JS1) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[1]);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "Current core mask (JS2) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[2]);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "Available core mask : 0x%llX\n",
+ kbdev->gpu_props.props.raw_props.shader_present);
+
+ return ret;
+}
+
+/** Store callback for the @c core_mask sysfs file.
+ *
+ * This function is called when the @c core_mask sysfs file is written to.
+ *
+ * @param dev The device with sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The value written to the sysfs file
+ * @param count The number of bytes written to the sysfs file
+ *
+ * @return @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ u64 new_core_mask[3];
+ int items;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ items = sscanf(buf, "%llx %llx %llx",
+ &new_core_mask[0], &new_core_mask[1],
+ &new_core_mask[2]);
+
+ if (items == 1)
+ new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
+
+ if (items == 1 || items == 3) {
+ u64 shader_present =
+ kbdev->gpu_props.props.raw_props.shader_present;
+ u64 group0_core_mask =
+ kbdev->gpu_props.props.coherency_info.group[0].
+ core_mask;
+
+ if ((new_core_mask[0] & shader_present) != new_core_mask[0] ||
+ !(new_core_mask[0] & group0_core_mask) ||
+ (new_core_mask[1] & shader_present) !=
+ new_core_mask[1] ||
+ !(new_core_mask[1] & group0_core_mask) ||
+ (new_core_mask[2] & shader_present) !=
+ new_core_mask[2] ||
+ !(new_core_mask[2] & group0_core_mask)) {
+ dev_err(dev, "power_policy: invalid core specification\n");
+ return -EINVAL;
+ }
+
+ if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
+ kbdev->pm.debug_core_mask[1] !=
+ new_core_mask[1] ||
+ kbdev->pm.debug_core_mask[2] !=
+ new_core_mask[2]) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
+ new_core_mask[1], new_core_mask[2]);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+
+ return count;
+ }
+
+ dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n"
+ "Use format <core_mask>\n"
+ "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
+ return -EINVAL;
+}
+
+/** The sysfs file @c core_mask.
+ *
+ * This is used to restrict shader core availability for debugging purposes.
+ * Reading it will show the current core mask and the mask of cores available.
+ * Writing to it will set the current core mask.
+ */
+static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
+
+/**
+ * set_soft_job_timeout() - Store callback for the soft_job_timeout sysfs
+ * file.
+ *
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This allows setting the timeout for software jobs. Waiting soft event wait
+ * jobs will be cancelled after this period expires, while soft fence wait jobs
+ * will print debug information if the fence debug feature is enabled.
+ *
+ * This is expressed in milliseconds.
+ *
+ * Return: count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_soft_job_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int soft_job_timeout_ms;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
+ (soft_job_timeout_ms <= 0))
+ return -EINVAL;
+
+ atomic_set(&kbdev->js_data.soft_job_timeout_ms,
+ soft_job_timeout_ms);
+
+ return count;
+}
+
+/**
+ * show_soft_job_timeout() - Show callback for the soft_job_timeout sysfs
+ * file.
+ *
+ * This will return the timeout for the software jobs.
+ *
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer for the sysfs file contents.
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t show_soft_job_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char * const buf)
+{
+ struct kbase_device *kbdev;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n",
+ atomic_read(&kbdev->js_data.soft_job_timeout_ms));
+}
+
+static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR,
+ show_soft_job_timeout, set_soft_job_timeout);
+
+static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
+ int default_ticks, u32 old_ticks)
+{
+ if (timeout_ms > 0) {
+ u64 ticks = timeout_ms * 1000000ULL;
+ do_div(ticks, kbdev->js_data.scheduling_period_ns);
+ if (!ticks)
+ return 1;
+ return ticks;
+ } else if (timeout_ms < 0) {
+ return default_ticks;
+ } else {
+ return old_ticks;
+ }
+}
+
+/** Store callback for the @c js_timeouts sysfs file.
+ *
+ * This function is called to get the contents of the @c js_timeouts sysfs
+ * file. This file contains five values separated by whitespace. The values
+ * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS,
+ * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING
+ * configuration values (in that order), with the difference that the js_timeout
+ * values are expressed in MILLISECONDS.
+ *
+ * The js_timeouts sysfile file allows the current values in
+ * use by the job scheduler to get override. Note that a value needs to
+ * be other than 0 for it to override the current job scheduler value.
+ *
+ * @param dev The device with sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The value written to the sysfs file
+ * @param count The number of bytes written to the sysfs file
+ *
+ * @return @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int items;
+ long js_soft_stop_ms;
+ long js_soft_stop_ms_cl;
+ long js_hard_stop_ms_ss;
+ long js_hard_stop_ms_cl;
+ long js_hard_stop_ms_dumping;
+ long js_reset_ms_ss;
+ long js_reset_ms_cl;
+ long js_reset_ms_dumping;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
+ &js_soft_stop_ms, &js_soft_stop_ms_cl,
+ &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
+ &js_hard_stop_ms_dumping, &js_reset_ms_ss,
+ &js_reset_ms_cl, &js_reset_ms_dumping);
+
+ if (items == 8) {
+ struct kbasep_js_device_data *js_data = &kbdev->js_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+#define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
+ js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
+ default, js_data->ticks_name); \
+ dev_dbg(kbdev->dev, "Overriding " #ticks_name \
+ " with %lu ticks (%lu ms)\n", \
+ (unsigned long)js_data->ticks_name, \
+ ms_name); \
+ } while (0)
+
+ UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
+ DEFAULT_JS_SOFT_STOP_TICKS);
+ UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
+ DEFAULT_JS_SOFT_STOP_TICKS_CL);
+ UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
+ DEFAULT_JS_HARD_STOP_TICKS_SS_8408 :
+ DEFAULT_JS_HARD_STOP_TICKS_SS);
+ UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
+ DEFAULT_JS_HARD_STOP_TICKS_CL);
+ UPDATE_TIMEOUT(hard_stop_ticks_dumping,
+ js_hard_stop_ms_dumping,
+ DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
+ UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
+ DEFAULT_JS_RESET_TICKS_SS_8408 :
+ DEFAULT_JS_RESET_TICKS_SS);
+ UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
+ DEFAULT_JS_RESET_TICKS_CL);
+ UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
+ DEFAULT_JS_RESET_TICKS_DUMPING);
+
+ kbase_js_set_timeouts(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return count;
+ }
+
+ dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
+ "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
+ "Write 0 for no change, -1 to restore default timeout\n");
+ return -EINVAL;
+}
+
+static unsigned long get_js_timeout_in_ms(
+ u32 scheduling_period_ns,
+ u32 ticks)
+{
+ u64 ms = (u64)ticks * scheduling_period_ns;
+
+ do_div(ms, 1000000UL);
+ return ms;
+}
+
+/** Show callback for the @c js_timeouts sysfs file.
+ *
+ * This function is called to get the contents of the @c js_timeouts sysfs
+ * file. It returns the last set values written to the js_timeouts sysfs file.
+ * If the file didn't get written yet, the values will be current setting in
+ * use.
+ * @param dev The device this sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The output buffer for the sysfs file contents
+ *
+ * @return The number of bytes output to @c buf.
+ */
+static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+ unsigned long js_soft_stop_ms;
+ unsigned long js_soft_stop_ms_cl;
+ unsigned long js_hard_stop_ms_ss;
+ unsigned long js_hard_stop_ms_cl;
+ unsigned long js_hard_stop_ms_dumping;
+ unsigned long js_reset_ms_ss;
+ unsigned long js_reset_ms_cl;
+ unsigned long js_reset_ms_dumping;
+ u32 scheduling_period_ns;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
+
+#define GET_TIMEOUT(name) get_js_timeout_in_ms(\
+ scheduling_period_ns, \
+ kbdev->js_data.name)
+
+ js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
+ js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
+ js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
+ js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
+ js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
+ js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
+ js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
+ js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
+
+#undef GET_TIMEOUT
+
+ ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
+ js_soft_stop_ms, js_soft_stop_ms_cl,
+ js_hard_stop_ms_ss, js_hard_stop_ms_cl,
+ js_hard_stop_ms_dumping, js_reset_ms_ss,
+ js_reset_ms_cl, js_reset_ms_dumping);
+
+ if (ret >= PAGE_SIZE) {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/** The sysfs file @c js_timeouts.
+ *
+ * This is used to override the current job scheduler values for
+ * JS_STOP_STOP_TICKS_SS
+ * JS_STOP_STOP_TICKS_CL
+ * JS_HARD_STOP_TICKS_SS
+ * JS_HARD_STOP_TICKS_CL
+ * JS_HARD_STOP_TICKS_DUMPING
+ * JS_RESET_TICKS_SS
+ * JS_RESET_TICKS_CL
+ * JS_RESET_TICKS_DUMPING.
+ */
+static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
+
+static u32 get_new_js_timeout(
+ u32 old_period,
+ u32 old_ticks,
+ u32 new_scheduling_period_ns)
+{
+ u64 ticks = (u64)old_period * (u64)old_ticks;
+ do_div(ticks, new_scheduling_period_ns);
+ return ticks?ticks:1;
+}
+
+/**
+ * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
+ * file
+ * @dev: The device the sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the js_scheduling_period sysfs file is written
+ * to. It checks the data written, and if valid updates the js_scheduling_period
+ * value
+ *
+ * Return: @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_scheduling_period(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ unsigned int js_scheduling_period;
+ u32 new_scheduling_period_ns;
+ u32 old_period;
+ struct kbasep_js_device_data *js_data;
+ unsigned long flags;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ js_data = &kbdev->js_data;
+
+ ret = kstrtouint(buf, 0, &js_scheduling_period);
+ if (ret || !js_scheduling_period) {
+ dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
+ "Use format <js_scheduling_period_ms>\n");
+ return -EINVAL;
+ }
+
+ new_scheduling_period_ns = js_scheduling_period * 1000000;
+
+ /* Update scheduling timeouts */
+ mutex_lock(&js_data->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* If no contexts have been scheduled since js_timeouts was last written
+ * to, the new timeouts might not have been latched yet. So check if an
+ * update is pending and use the new values if necessary. */
+
+ /* Use previous 'new' scheduling period as a base if present. */
+ old_period = js_data->scheduling_period_ns;
+
+#define SET_TIMEOUT(name) \
+ (js_data->name = get_new_js_timeout(\
+ old_period, \
+ kbdev->js_data.name, \
+ new_scheduling_period_ns))
+
+ SET_TIMEOUT(soft_stop_ticks);
+ SET_TIMEOUT(soft_stop_ticks_cl);
+ SET_TIMEOUT(hard_stop_ticks_ss);
+ SET_TIMEOUT(hard_stop_ticks_cl);
+ SET_TIMEOUT(hard_stop_ticks_dumping);
+ SET_TIMEOUT(gpu_reset_ticks_ss);
+ SET_TIMEOUT(gpu_reset_ticks_cl);
+ SET_TIMEOUT(gpu_reset_ticks_dumping);
+
+#undef SET_TIMEOUT
+
+ js_data->scheduling_period_ns = new_scheduling_period_ns;
+
+ kbase_js_set_timeouts(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_data->runpool_mutex);
+
+ dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
+ js_scheduling_period);
+
+ return count;
+}
+
+/**
+ * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
+ * entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the JS scheduling
+ * period.
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t show_js_scheduling_period(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ u32 period;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ period = kbdev->js_data.scheduling_period_ns;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n",
+ period / 1000000);
+
+ return ret;
+}
+
+static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
+ show_js_scheduling_period, set_js_scheduling_period);
+
+#if !MALI_CUSTOMER_RELEASE
+/** Store callback for the @c force_replay sysfs file.
+ *
+ * @param dev The device with sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The value written to the sysfs file
+ * @param count The number of bytes written to the sysfs file
+ *
+ * @return @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ if (!strncmp("limit=", buf, MIN(6, count))) {
+ int force_replay_limit;
+ int items = sscanf(buf, "limit=%u", &force_replay_limit);
+
+ if (items == 1) {
+ kbdev->force_replay_random = false;
+ kbdev->force_replay_limit = force_replay_limit;
+ kbdev->force_replay_count = 0;
+
+ return count;
+ }
+ } else if (!strncmp("random_limit", buf, MIN(12, count))) {
+ kbdev->force_replay_random = true;
+ kbdev->force_replay_count = 0;
+
+ return count;
+ } else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
+ kbdev->force_replay_random = false;
+ kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
+ kbdev->force_replay_count = 0;
+
+ return count;
+ } else if (!strncmp("core_req=", buf, MIN(9, count))) {
+ unsigned int core_req;
+ int items = sscanf(buf, "core_req=%x", &core_req);
+
+ if (items == 1) {
+ kbdev->force_replay_core_req = (base_jd_core_req)core_req;
+
+ return count;
+ }
+ }
+ dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
+ return -EINVAL;
+}
+
+/** Show callback for the @c force_replay sysfs file.
+ *
+ * This function is called to get the contents of the @c force_replay sysfs
+ * file. It returns the last set value written to the force_replay sysfs file.
+ * If the file didn't get written yet, the values will be 0.
+ *
+ * @param dev The device this sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The output buffer for the sysfs file contents
+ *
+ * @return The number of bytes output to @c buf.
+ */
+static ssize_t show_force_replay(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ if (kbdev->force_replay_random)
+ ret = scnprintf(buf, PAGE_SIZE,
+ "limit=0\nrandom_limit\ncore_req=%x\n",
+ kbdev->force_replay_core_req);
+ else
+ ret = scnprintf(buf, PAGE_SIZE,
+ "limit=%u\nnorandom_limit\ncore_req=%x\n",
+ kbdev->force_replay_limit,
+ kbdev->force_replay_core_req);
+
+ if (ret >= PAGE_SIZE) {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/** The sysfs file @c force_replay.
+ *
+ */
+static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
+ set_force_replay);
+#endif /* !MALI_CUSTOMER_RELEASE */
+
+#ifdef CONFIG_MALI_DEBUG
+static ssize_t set_js_softstop_always(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ int softstop_always;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtoint(buf, 0, &softstop_always);
+ if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
+ dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
+ "Use format <soft_stop_always>\n");
+ return -EINVAL;
+ }
+
+ kbdev->js_data.softstop_always = (bool) softstop_always;
+ dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
+ (kbdev->js_data.softstop_always) ?
+ "Enabled" : "Disabled");
+ return count;
+}
+
+static ssize_t show_js_softstop_always(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
+
+ if (ret >= PAGE_SIZE) {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/*
+ * By default, soft-stops are disabled when only a single context is present. The ability to
+ * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
+ * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
+ */
+static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
+#endif /* CONFIG_MALI_DEBUG */
+
+#ifdef CONFIG_MALI_DEBUG
+typedef void (kbasep_debug_command_func) (struct kbase_device *);
+
+enum kbasep_debug_command_code {
+ KBASEP_DEBUG_COMMAND_DUMPTRACE,
+
+ /* This must be the last enum */
+ KBASEP_DEBUG_COMMAND_COUNT
+};
+
+struct kbasep_debug_command {
+ char *str;
+ kbasep_debug_command_func *func;
+};
+
+/** Debug commands supported by the driver */
+static const struct kbasep_debug_command debug_commands[] = {
+ {
+ .str = "dumptrace",
+ .func = &kbasep_trace_dump,
+ }
+};
+
+/** Show callback for the @c debug_command sysfs file.
+ *
+ * This function is called to get the contents of the @c debug_command sysfs
+ * file. This is a list of the available debug commands, separated by newlines.
+ *
+ * @param dev The device this sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The output buffer for the sysfs file contents
+ *
+ * @return The number of bytes output to @c buf.
+ */
+static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ int i;
+ ssize_t ret = 0;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
+
+ if (ret >= PAGE_SIZE) {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+/** Store callback for the @c debug_command sysfs file.
+ *
+ * This function is called when the @c debug_command sysfs file is written to.
+ * It matches the requested command against the available commands, and if
+ * a matching command is found calls the associated function from
+ * @ref debug_commands to issue the command.
+ *
+ * @param dev The device with sysfs file is for
+ * @param attr The attributes of the sysfs file
+ * @param buf The value written to the sysfs file
+ * @param count The number of bytes written to the sysfs file
+ *
+ * @return @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int i;
+
+ kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
+ if (sysfs_streq(debug_commands[i].str, buf)) {
+ debug_commands[i].func(kbdev);
+ return count;
+ }
+ }
+
+ /* Debug Command not found */
+ dev_err(dev, "debug_command: command not known\n");
+ return -EINVAL;
+}
+
+/** The sysfs file @c debug_command.
+ *
+ * This is used to issue general debug commands to the device driver.
+ * Reading it will produce a list of debug commands, separated by newlines.
+ * Writing to it with one of those commands will issue said command.
+ */
+static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
+#endif /* CONFIG_MALI_DEBUG */
+
+/**
+ * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get a description of the present Mali
+ * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
+ * number of cores, the hardware version and the raw product id. For
+ * example:
+ *
+ * Mali-T60x MP4 r0p0 0x6956
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t kbase_show_gpuinfo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const struct gpu_product_id_name {
+ unsigned id;
+ char *name;
+ } gpu_product_id_names[] = {
+ { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
+ { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
+ { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
+ { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
+ { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
+ { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
+ { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
+ { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
+ { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-G71" },
+ { .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-THEx" },
+ };
+ const char *product_name = "(Unknown Mali GPU)";
+ struct kbase_device *kbdev;
+ u32 gpu_id;
+ unsigned product_id, product_id_mask;
+ unsigned i;
+ bool is_new_format;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
+ product_id_mask =
+ (is_new_format ?
+ GPU_ID2_PRODUCT_MODEL :
+ GPU_ID_VERSION_PRODUCT_ID) >>
+ GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
+ const struct gpu_product_id_name *p = &gpu_product_id_names[i];
+
+ if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
+ (p->id & product_id_mask) ==
+ (product_id & product_id_mask)) {
+ product_name = p->name;
+ break;
+ }
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n",
+ product_name, kbdev->gpu_props.num_cores,
+ (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
+ (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
+ product_id);
+}
+static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
+
+/**
+ * set_dvfs_period - Store callback for the dvfs_period sysfs file.
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the dvfs_period sysfs file is written to. It
+ * checks the data written, and if valid updates the DVFS period variable,
+ *
+ * Return: @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_dvfs_period(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ int dvfs_period;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtoint(buf, 0, &dvfs_period);
+ if (ret || dvfs_period <= 0) {
+ dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
+ "Use format <dvfs_period_ms>\n");
+ return -EINVAL;
+ }
+
+ kbdev->pm.dvfs_period = dvfs_period;
+ dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
+
+ return count;
+}
+
+/**
+ * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the DVFS sample
+ * timer.
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t show_dvfs_period(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
+
+ return ret;
+}
+
+static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
+ set_dvfs_period);
+
+/**
+ * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the pm_poweroff sysfs file is written to.
+ *
+ * This file contains three values separated by whitespace. The values
+ * are gpu_poweroff_time (the period of the poweroff timer, in ns),
+ * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
+ * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
+ * ticks before the GPU is powered off), in that order.
+ *
+ * Return: @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_pm_poweroff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int items;
+ s64 gpu_poweroff_time;
+ int poweroff_shader_ticks, poweroff_gpu_ticks;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
+ &poweroff_shader_ticks,
+ &poweroff_gpu_ticks);
+ if (items != 3) {
+ dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
+ "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
+ return -EINVAL;
+ }
+
+ kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
+ kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks;
+ kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks;
+
+ return count;
+}
+
+/**
+ * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the DVFS sample
+ * timer.
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t show_pm_poweroff(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n",
+ ktime_to_ns(kbdev->pm.gpu_poweroff_time),
+ kbdev->pm.poweroff_shader_ticks,
+ kbdev->pm.poweroff_gpu_ticks);
+
+ return ret;
+}
+
+static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
+ set_pm_poweroff);
+
+/**
+ * set_reset_timeout - Store callback for the reset_timeout sysfs file.
+ * @dev: The device with sysfs file is for
+ * @attr: The attributes of the sysfs file
+ * @buf: The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the reset_timeout sysfs file is written to. It
+ * checks the data written, and if valid updates the reset timeout.
+ *
+ * Return: @c count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_reset_timeout(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ int reset_timeout;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtoint(buf, 0, &reset_timeout);
+ if (ret || reset_timeout <= 0) {
+ dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
+ "Use format <reset_timeout_ms>\n");
+ return -EINVAL;
+ }
+
+ kbdev->reset_timeout_ms = reset_timeout;
+ dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
+
+ return count;
+}
+
+/**
+ * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current reset timeout.
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t show_reset_timeout(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
+
+ return ret;
+}
+
+static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
+ set_reset_timeout);
+
+
+
+static ssize_t show_mem_pool_size(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
+ kbase_mem_pool_size(&kbdev->mem_pool));
+
+ return ret;
+}
+
+static ssize_t set_mem_pool_size(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ size_t new_size;
+ int err;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ err = kstrtoul(buf, 0, (unsigned long *)&new_size);
+ if (err)
+ return err;
+
+ kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
+
+ return count;
+}
+
+static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
+ set_mem_pool_size);
+
+static ssize_t show_mem_pool_max_size(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+ ssize_t ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
+ kbase_mem_pool_max_size(&kbdev->mem_pool));
+
+ return ret;
+}
+
+static ssize_t set_mem_pool_max_size(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ size_t new_max_size;
+ int err;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
+ if (err)
+ return -EINVAL;
+
+ kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
+
+ return count;
+}
+
+static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
+ set_mem_pool_max_size);
+
+
+static int kbasep_protected_mode_enter(struct kbase_device *kbdev)
+{
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_SET_PROTECTED_MODE, NULL);
+ return 0;
+}
+
+static bool kbasep_protected_mode_supported(struct kbase_device *kbdev)
+{
+ return true;
+}
+
+static struct kbase_protected_ops kbasep_protected_ops = {
+ .protected_mode_enter = kbasep_protected_mode_enter,
+ .protected_mode_reset = NULL,
+ .protected_mode_supported = kbasep_protected_mode_supported,
+};
+
+static void kbasep_protected_mode_init(struct kbase_device *kbdev)
+{
+ kbdev->protected_ops = NULL;
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+ /* Use native protected ops */
+ kbdev->protected_ops = &kbasep_protected_ops;
+ }
+#ifdef PROTECTED_CALLBACKS
+ else
+ kbdev->protected_ops = PROTECTED_CALLBACKS;
+#endif
+
+ if (kbdev->protected_ops)
+ kbdev->protected_mode_support =
+ kbdev->protected_ops->protected_mode_supported(kbdev);
+ else
+ kbdev->protected_mode_support = false;
+}
+
+#ifdef CONFIG_MALI_NO_MALI
+static int kbase_common_reg_map(struct kbase_device *kbdev)
+{
+ return 0;
+}
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+{
+}
+#else /* CONFIG_MALI_NO_MALI */
+static int kbase_common_reg_map(struct kbase_device *kbdev)
+{
+ int err = -ENOMEM;
+
+ if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
+ dev_err(kbdev->dev, "Register window unavailable\n");
+ err = -EIO;
+ goto out_region;
+ }
+
+ kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
+ if (!kbdev->reg) {
+ dev_err(kbdev->dev, "Can't remap register window\n");
+ err = -EINVAL;
+ goto out_ioremap;
+ }
+
+ return 0;
+
+ out_ioremap:
+ release_mem_region(kbdev->reg_start, kbdev->reg_size);
+ out_region:
+ return err;
+}
+
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+{
+ if (kbdev->reg) {
+ iounmap(kbdev->reg);
+ release_mem_region(kbdev->reg_start, kbdev->reg_size);
+ kbdev->reg = NULL;
+ kbdev->reg_start = 0;
+ kbdev->reg_size = 0;
+ }
+}
+#endif /* CONFIG_MALI_NO_MALI */
+
+static int registers_map(struct kbase_device * const kbdev)
+{
+
+ /* the first memory resource is the physical address of the GPU
+ * registers */
+ struct platform_device *pdev = to_platform_device(kbdev->dev);
+ struct resource *reg_res;
+ int err;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!reg_res) {
+ dev_err(kbdev->dev, "Invalid register resource\n");
+ return -ENOENT;
+ }
+
+ kbdev->reg_start = reg_res->start;
+ kbdev->reg_size = resource_size(reg_res);
+
+ err = kbase_common_reg_map(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Failed to map registers\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void registers_unmap(struct kbase_device *kbdev)
+{
+ kbase_common_reg_unmap(kbdev);
+}
+
+static int power_control_init(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+ int err = 0;
+
+ if (!kbdev)
+ return -ENODEV;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_REGULATOR)
+ kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
+ if (IS_ERR_OR_NULL(kbdev->regulator)) {
+ err = PTR_ERR(kbdev->regulator);
+ kbdev->regulator = NULL;
+ if (err == -EPROBE_DEFER) {
+ dev_err(&pdev->dev, "Failed to get regulator\n");
+ return err;
+ }
+ dev_info(kbdev->dev,
+ "Continuing without Mali regulator control\n");
+ /* Allow probe to continue without regulator */
+ }
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+
+ kbdev->clock = clk_get(kbdev->dev, "clk_mali");
+ if (IS_ERR_OR_NULL(kbdev->clock)) {
+ err = PTR_ERR(kbdev->clock);
+ kbdev->clock = NULL;
+ if (err == -EPROBE_DEFER) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ goto fail;
+ }
+ dev_info(kbdev->dev, "Continuing without Mali clock control\n");
+ /* Allow probe to continue without clock. */
+ } else {
+ err = clk_prepare_enable(kbdev->clock);
+ if (err) {
+ dev_err(kbdev->dev,
+ "Failed to prepare and enable clock (%d)\n",
+ err);
+ goto fail;
+ }
+ }
+
+#if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
+ /* Register the OPPs if they are available in device tree */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) \
+ || defined(LSK_OPPV2_BACKPORT)
+ err = dev_pm_opp_of_add_table(kbdev->dev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ err = of_init_opp_table(kbdev->dev);
+#else
+ err = 0;
+#endif /* LINUX_VERSION_CODE */
+ if (err)
+ dev_dbg(kbdev->dev, "OPP table not found\n");
+#endif /* CONFIG_OF && CONFIG_PM_OPP */
+
+ return 0;
+
+fail:
+
+if (kbdev->clock != NULL) {
+ clk_put(kbdev->clock);
+ kbdev->clock = NULL;
+}
+
+#ifdef CONFIG_REGULATOR
+ if (NULL != kbdev->regulator) {
+ regulator_put(kbdev->regulator);
+ kbdev->regulator = NULL;
+ }
+#endif
+
+ return err;
+}
+
+static void power_control_term(struct kbase_device *kbdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ dev_pm_opp_of_remove_table(kbdev->dev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ of_free_opp_table(kbdev->dev);
+#endif
+
+ if (kbdev->clock) {
+ clk_disable_unprepare(kbdev->clock);
+ clk_put(kbdev->clock);
+ kbdev->clock = NULL;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+ && defined(CONFIG_REGULATOR)
+ if (kbdev->regulator) {
+ regulator_put(kbdev->regulator);
+ kbdev->regulator = NULL;
+ }
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#if KBASE_GPU_RESET_EN
+#include <mali_kbase_hwaccess_jm.h>
+
+static void trigger_quirks_reload(struct kbase_device *kbdev)
+{
+ kbase_pm_context_active(kbdev);
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+ kbase_pm_context_idle(kbdev);
+}
+
+#define MAKE_QUIRK_ACCESSORS(type) \
+static int type##_quirks_set(void *data, u64 val) \
+{ \
+ struct kbase_device *kbdev; \
+ kbdev = (struct kbase_device *)data; \
+ kbdev->hw_quirks_##type = (u32)val; \
+ trigger_quirks_reload(kbdev); \
+ return 0;\
+} \
+\
+static int type##_quirks_get(void *data, u64 *val) \
+{ \
+ struct kbase_device *kbdev;\
+ kbdev = (struct kbase_device *)data;\
+ *val = kbdev->hw_quirks_##type;\
+ return 0;\
+} \
+DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
+ type##_quirks_set, "%llu\n")
+
+MAKE_QUIRK_ACCESSORS(sc);
+MAKE_QUIRK_ACCESSORS(tiler);
+MAKE_QUIRK_ACCESSORS(mmu);
+
+#endif /* KBASE_GPU_RESET_EN */
+
+/**
+ * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
+ * @file: File object to read is for
+ * @buf: User buffer to populate with data
+ * @len: Length of user buffer
+ * @ppos: Offset within file object
+ *
+ * Retrieves the current status of protected debug mode
+ * (0 = disabled, 1 = enabled)
+ *
+ * Return: Number of bytes added to user buffer
+ */
+static ssize_t debugfs_protected_debug_mode_read(struct file *file,
+ char __user *buf, size_t len, loff_t *ppos)
+{
+ struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
+ u32 gpu_status;
+ ssize_t ret_val;
+
+ kbase_pm_context_active(kbdev);
+ gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL);
+ kbase_pm_context_idle(kbdev);
+
+ if (gpu_status & GPU_DBGEN)
+ ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
+ else
+ ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
+
+ return ret_val;
+}
+
+/*
+ * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
+ *
+ * Contains the file operations for the "protected_debug_mode" debugfs file
+ */
+static const struct file_operations fops_protected_debug_mode = {
+ .open = simple_open,
+ .read = debugfs_protected_debug_mode_read,
+ .llseek = default_llseek,
+};
+
+static int kbase_device_debugfs_init(struct kbase_device *kbdev)
+{
+ struct dentry *debugfs_ctx_defaults_directory;
+ int err;
+
+ kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
+ NULL);
+ if (!kbdev->mali_debugfs_directory) {
+ dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
+ kbdev->mali_debugfs_directory);
+ if (!kbdev->debugfs_ctx_directory) {
+ dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
+ kbdev->debugfs_ctx_directory);
+ if (!debugfs_ctx_defaults_directory) {
+ dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+#if !MALI_CUSTOMER_RELEASE
+ kbasep_regs_dump_debugfs_init(kbdev);
+#endif /* !MALI_CUSTOMER_RELEASE */
+ kbasep_regs_history_debugfs_init(kbdev);
+
+ kbase_debug_job_fault_debugfs_init(kbdev);
+ kbasep_gpu_memory_debugfs_init(kbdev);
+ kbase_as_fault_debugfs_init(kbdev);
+#if KBASE_GPU_RESET_EN
+ debugfs_create_file("quirks_sc", 0644,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_sc_quirks);
+ debugfs_create_file("quirks_tiler", 0644,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_tiler_quirks);
+ debugfs_create_file("quirks_mmu", 0644,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_mmu_quirks);
+#endif /* KBASE_GPU_RESET_EN */
+
+#ifndef CONFIG_MALI_COH_USER
+ debugfs_create_bool("infinite_cache", 0644,
+ debugfs_ctx_defaults_directory,
+ &kbdev->infinite_cache_active_default);
+#endif /* CONFIG_MALI_COH_USER */
+
+ debugfs_create_size_t("mem_pool_max_size", 0644,
+ debugfs_ctx_defaults_directory,
+ &kbdev->mem_pool_max_size_default);
+
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+ debugfs_create_file("protected_debug_mode", S_IRUGO,
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_protected_debug_mode);
+ }
+
+#if KBASE_TRACE_ENABLE
+ kbasep_trace_debugfs_init(kbdev);
+#endif /* KBASE_TRACE_ENABLE */
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ kbasep_trace_timeline_debugfs_init(kbdev);
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+ return 0;
+
+out:
+ debugfs_remove_recursive(kbdev->mali_debugfs_directory);
+ return err;
+}
+
+static void kbase_device_debugfs_term(struct kbase_device *kbdev)
+{
+ debugfs_remove_recursive(kbdev->mali_debugfs_directory);
+}
+
+#else /* CONFIG_DEBUG_FS */
+static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
+{
+ return 0;
+}
+
+static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id)
+{
+#ifdef CONFIG_OF
+ u32 supported_coherency_bitmap =
+ kbdev->gpu_props.props.raw_props.coherency_mode;
+ const void *coherency_override_dts;
+ u32 override_coherency;
+#endif /* CONFIG_OF */
+
+ kbdev->system_coherency = COHERENCY_NONE;
+
+ /* device tree may override the coherency */
+#ifdef CONFIG_OF
+ coherency_override_dts = of_get_property(kbdev->dev->of_node,
+ "system-coherency",
+ NULL);
+ if (coherency_override_dts) {
+
+ override_coherency = be32_to_cpup(coherency_override_dts);
+
+ if ((override_coherency <= COHERENCY_NONE) &&
+ (supported_coherency_bitmap &
+ COHERENCY_FEATURE_BIT(override_coherency))) {
+
+ kbdev->system_coherency = override_coherency;
+
+ dev_info(kbdev->dev,
+ "Using coherency mode %u set from dtb",
+ override_coherency);
+ } else
+ dev_warn(kbdev->dev,
+ "Ignoring unsupported coherency mode %u set from dtb",
+ override_coherency);
+ }
+
+#endif /* CONFIG_OF */
+
+ kbdev->gpu_props.props.raw_props.coherency_mode =
+ kbdev->system_coherency;
+}
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+
+/* Callback used by the kbase bus logger client, to initiate a GPU reset
+ * when the bus log is restarted. GPU reset is used as reference point
+ * in HW bus log analyses.
+ */
+static void kbase_logging_started_cb(void *data)
+{
+ struct kbase_device *kbdev = (struct kbase_device *)data;
+
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+ dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
+}
+#endif
+
+static struct attribute *kbase_attrs[] = {
+#ifdef CONFIG_MALI_DEBUG
+ &dev_attr_debug_command.attr,
+ &dev_attr_js_softstop_always.attr,
+#endif
+#if !MALI_CUSTOMER_RELEASE
+ &dev_attr_force_replay.attr,
+#endif
+ &dev_attr_js_timeouts.attr,
+ &dev_attr_soft_job_timeout.attr,
+ &dev_attr_gpuinfo.attr,
+ &dev_attr_dvfs_period.attr,
+ &dev_attr_pm_poweroff.attr,
+ &dev_attr_reset_timeout.attr,
+ &dev_attr_js_scheduling_period.attr,
+ &dev_attr_power_policy.attr,
+ &dev_attr_core_availability_policy.attr,
+ &dev_attr_core_mask.attr,
+ &dev_attr_mem_pool_size.attr,
+ &dev_attr_mem_pool_max_size.attr,
+ NULL
+};
+
+static const struct attribute_group kbase_attr_group = {
+ .attrs = kbase_attrs,
+};
+
+static int kbase_platform_device_remove(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+ const struct list_head *dev_list;
+
+ if (!kbdev)
+ return -ENODEV;
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+ if (kbdev->inited_subsys & inited_buslogger) {
+ bl_core_client_unregister(kbdev->buslogger);
+ kbdev->inited_subsys &= ~inited_buslogger;
+ }
+#endif
+
+ if (kbdev->inited_subsys & inited_sysfs_group) {
+ sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
+ kbdev->inited_subsys &= ~inited_sysfs_group;
+ }
+
+ if (kbdev->inited_subsys & inited_dev_list) {
+ dev_list = kbase_dev_list_get();
+ list_del(&kbdev->entry);
+ kbase_dev_list_put(dev_list);
+ kbdev->inited_subsys &= ~inited_dev_list;
+ }
+
+ if (kbdev->inited_subsys & inited_misc_register) {
+ misc_deregister(&kbdev->mdev);
+ kbdev->inited_subsys &= ~inited_misc_register;
+ }
+
+ if (kbdev->inited_subsys & inited_get_device) {
+ put_device(kbdev->dev);
+ kbdev->inited_subsys &= ~inited_get_device;
+ }
+
+ if (kbdev->inited_subsys & inited_debugfs) {
+ kbase_device_debugfs_term(kbdev);
+ kbdev->inited_subsys &= ~inited_debugfs;
+ }
+
+ if (kbdev->inited_subsys & inited_job_fault) {
+ kbase_debug_job_fault_dev_term(kbdev);
+ kbdev->inited_subsys &= ~inited_job_fault;
+ }
+
+#ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
+ if (kbdev->inited_subsys & inited_ipa) {
+ kbase_ipa_term(kbdev->ipa_ctx);
+ kbdev->inited_subsys &= ~inited_ipa;
+ }
+#endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
+
+ if (kbdev->inited_subsys & inited_vinstr) {
+ kbase_vinstr_term(kbdev->vinstr_ctx);
+ kbdev->inited_subsys &= ~inited_vinstr;
+ }
+
+#ifdef CONFIG_MALI_DEVFREQ
+ if (kbdev->inited_subsys & inited_devfreq) {
+ kbase_devfreq_term(kbdev);
+ kbdev->inited_subsys &= ~inited_devfreq;
+ }
+#endif
+
+ if (kbdev->inited_subsys & inited_backend_late) {
+ kbase_backend_late_term(kbdev);
+ kbdev->inited_subsys &= ~inited_backend_late;
+ }
+
+ if (kbdev->inited_subsys & inited_tlstream) {
+ kbase_tlstream_term();
+ kbdev->inited_subsys &= ~inited_tlstream;
+ }
+
+ /* Bring job and mem sys to a halt before we continue termination */
+
+ if (kbdev->inited_subsys & inited_js)
+ kbasep_js_devdata_halt(kbdev);
+
+ if (kbdev->inited_subsys & inited_mem)
+ kbase_mem_halt(kbdev);
+
+ if (kbdev->inited_subsys & inited_js) {
+ kbasep_js_devdata_term(kbdev);
+ kbdev->inited_subsys &= ~inited_js;
+ }
+
+ if (kbdev->inited_subsys & inited_mem) {
+ kbase_mem_term(kbdev);
+ kbdev->inited_subsys &= ~inited_mem;
+ }
+
+ if (kbdev->inited_subsys & inited_pm_runtime_init) {
+ kbdev->pm.callback_power_runtime_term(kbdev);
+ kbdev->inited_subsys &= ~inited_pm_runtime_init;
+ }
+
+ if (kbdev->inited_subsys & inited_device) {
+ kbase_device_term(kbdev);
+ kbdev->inited_subsys &= ~inited_device;
+ }
+
+ if (kbdev->inited_subsys & inited_backend_early) {
+ kbase_backend_early_term(kbdev);
+ kbdev->inited_subsys &= ~inited_backend_early;
+ }
+
+ if (kbdev->inited_subsys & inited_io_history) {
+ kbase_io_history_term(&kbdev->io_history);
+ kbdev->inited_subsys &= ~inited_io_history;
+ }
+
+ if (kbdev->inited_subsys & inited_power_control) {
+ power_control_term(kbdev);
+ kbdev->inited_subsys &= ~inited_power_control;
+ }
+
+ if (kbdev->inited_subsys & inited_registers_map) {
+ registers_unmap(kbdev);
+ kbdev->inited_subsys &= ~inited_registers_map;
+ }
+
+#ifdef CONFIG_MALI_NO_MALI
+ if (kbdev->inited_subsys & inited_gpu_device) {
+ gpu_device_destroy(kbdev);
+ kbdev->inited_subsys &= ~inited_gpu_device;
+ }
+#endif /* CONFIG_MALI_NO_MALI */
+
+ if (kbdev->inited_subsys != 0)
+ dev_err(kbdev->dev, "Missing sub system termination\n");
+
+ kbase_device_free(kbdev);
+
+ return 0;
+}
+
+
+/* Number of register accesses for the buffer that we allocate during
+ * initialization time. The buffer size can be changed later via debugfs. */
+#define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
+
+static int kbase_platform_device_probe(struct platform_device *pdev)
+{
+ struct kbase_device *kbdev;
+ struct mali_base_gpu_core_props *core_props;
+ u32 gpu_id;
+ const struct list_head *dev_list;
+ int err = 0;
+
+#ifdef CONFIG_OF
+ err = kbase_platform_early_init();
+ if (err) {
+ dev_err(&pdev->dev, "Early platform initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+#endif
+
+ kbdev = kbase_device_alloc();
+ if (!kbdev) {
+ dev_err(&pdev->dev, "Allocate device failed\n");
+ kbase_platform_device_remove(pdev);
+ return -ENOMEM;
+ }
+
+ kbdev->dev = &pdev->dev;
+ dev_set_drvdata(kbdev->dev, kbdev);
+
+#ifdef CONFIG_MALI_NO_MALI
+ err = gpu_device_create(kbdev);
+ if (err) {
+ dev_err(&pdev->dev, "Dummy model initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_gpu_device;
+#endif /* CONFIG_MALI_NO_MALI */
+
+ err = assign_irqs(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "IRQ search failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+
+ err = registers_map(kbdev);
+ if (err) {
+ dev_err(&pdev->dev, "Register map failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_registers_map;
+
+ err = power_control_init(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Power control initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_power_control;
+
+ err = kbase_io_history_init(&kbdev->io_history,
+ KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "Register access history initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return -ENOMEM;
+ }
+ kbdev->inited_subsys |= inited_io_history;
+
+ err = kbase_backend_early_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Early backend initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_backend_early;
+
+ scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
+ kbase_dev_nr);
+
+ kbase_disjoint_init(kbdev);
+
+ /* obtain min/max configured gpu frequencies */
+ core_props = &(kbdev->gpu_props.props.core_props);
+ core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
+ core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
+
+ kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
+
+ err = kbase_device_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_device;
+
+ if (kbdev->pm.callback_power_runtime_init) {
+ err = kbdev->pm.callback_power_runtime_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev,
+ "Runtime PM initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_pm_runtime_init;
+ }
+
+ err = kbase_mem_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_mem;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
+ gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ kbase_device_coherency_init(kbdev, gpu_id);
+
+ kbasep_protected_mode_init(kbdev);
+
+ err = kbasep_js_devdata_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_js;
+
+ err = kbase_tlstream_init();
+ if (err) {
+ dev_err(kbdev->dev, "Timeline stream initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_tlstream;
+
+ err = kbase_backend_late_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Late backend initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_backend_late;
+
+#ifdef CONFIG_MALI_DEVFREQ
+ err = kbase_devfreq_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Fevfreq initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_devfreq;
+#endif /* CONFIG_MALI_DEVFREQ */
+
+ kbdev->vinstr_ctx = kbase_vinstr_init(kbdev);
+ if (!kbdev->vinstr_ctx) {
+ dev_err(kbdev->dev,
+ "Virtual instrumentation initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return -EINVAL;
+ }
+ kbdev->inited_subsys |= inited_vinstr;
+
+#ifndef CONFIG_MALI_PRFCNT_SET_SECONDARY
+ kbdev->ipa_ctx = kbase_ipa_init(kbdev);
+ if (!kbdev->ipa_ctx) {
+ dev_err(kbdev->dev, "IPA initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return -EINVAL;
+ }
+
+ kbdev->inited_subsys |= inited_ipa;
+#endif /* CONFIG_MALI_PRFCNT_SET_SECONDARY */
+
+ err = kbase_debug_job_fault_dev_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "Job fault debug initialization failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_job_fault;
+
+ err = kbase_device_debugfs_init(kbdev);
+ if (err) {
+ dev_err(kbdev->dev, "DebugFS initialization failed");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_debugfs;
+
+ /* initialize the kctx list */
+ mutex_init(&kbdev->kctx_list_lock);
+ INIT_LIST_HEAD(&kbdev->kctx_list);
+
+ kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
+ kbdev->mdev.name = kbdev->devname;
+ kbdev->mdev.fops = &kbase_fops;
+ kbdev->mdev.parent = get_device(kbdev->dev);
+ kbdev->inited_subsys |= inited_get_device;
+
+ err = misc_register(&kbdev->mdev);
+ if (err) {
+ dev_err(kbdev->dev, "Misc device registration failed for %s\n",
+ kbdev->devname);
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_misc_register;
+
+ dev_list = kbase_dev_list_get();
+ list_add(&kbdev->entry, &kbase_dev_list);
+ kbase_dev_list_put(dev_list);
+ kbdev->inited_subsys |= inited_dev_list;
+
+ err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
+ if (err) {
+ dev_err(&pdev->dev, "SysFS group creation failed\n");
+ kbase_platform_device_remove(pdev);
+ return err;
+ }
+ kbdev->inited_subsys |= inited_sysfs_group;
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+ err = bl_core_client_register(kbdev->devname,
+ kbase_logging_started_cb,
+ kbdev, &kbdev->buslogger,
+ THIS_MODULE, NULL);
+ if (err == 0) {
+ kbdev->inited_subsys |= inited_buslogger;
+ bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
+ } else {
+ dev_warn(kbdev->dev, "Bus log client registration failed\n");
+ err = 0;
+ }
+#endif
+
+ dev_info(kbdev->dev,
+ "Probed as %s\n", dev_name(kbdev->mdev.this_device));
+
+ kbase_dev_nr++;
+
+ return err;
+}
+
+#undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
+
+
+/** Suspend callback from the OS.
+ *
+ * This is called by Linux when the device should suspend.
+ *
+ * @param dev The device to suspend
+ *
+ * @return A standard Linux error code
+ */
+static int kbase_device_suspend(struct device *dev)
+{
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+#if defined(CONFIG_PM_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ devfreq_suspend_device(kbdev->devfreq);
+#endif
+
+ kbase_pm_suspend(kbdev);
+ return 0;
+}
+
+/** Resume callback from the OS.
+ *
+ * This is called by Linux when the device should resume from suspension.
+ *
+ * @param dev The device to resume
+ *
+ * @return A standard Linux error code
+ */
+static int kbase_device_resume(struct device *dev)
+{
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ kbase_pm_resume(kbdev);
+
+#if defined(CONFIG_PM_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ devfreq_resume_device(kbdev->devfreq);
+#endif
+ return 0;
+}
+
+/** Runtime suspend callback from the OS.
+ *
+ * This is called by Linux when the device should prepare for a condition in which it will
+ * not be able to communicate with the CPU(s) and RAM due to power management.
+ *
+ * @param dev The device to suspend
+ *
+ * @return A standard Linux error code
+ */
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_suspend(struct device *dev)
+{
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+#if defined(CONFIG_PM_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ devfreq_suspend_device(kbdev->devfreq);
+#endif
+
+ if (kbdev->pm.backend.callback_power_runtime_off) {
+ kbdev->pm.backend.callback_power_runtime_off(kbdev);
+ dev_dbg(dev, "runtime suspend\n");
+ }
+ return 0;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+/** Runtime resume callback from the OS.
+ *
+ * This is called by Linux when the device should go into a fully active state.
+ *
+ * @param dev The device to suspend
+ *
+ * @return A standard Linux error code
+ */
+
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ if (kbdev->pm.backend.callback_power_runtime_on) {
+ ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
+ dev_dbg(dev, "runtime resume\n");
+ }
+
+#if defined(CONFIG_PM_DEVFREQ) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ devfreq_resume_device(kbdev->devfreq);
+#endif
+
+ return ret;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+
+#ifdef KBASE_PM_RUNTIME
+/**
+ * kbase_device_runtime_idle - Runtime idle callback from the OS.
+ * @dev: The device to suspend
+ *
+ * This is called by Linux when the device appears to be inactive and it might
+ * be placed into a low power state.
+ *
+ * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
+ * otherwise a standard Linux error code
+ */
+static int kbase_device_runtime_idle(struct device *dev)
+{
+ struct kbase_device *kbdev = to_kbase_device(dev);
+
+ if (!kbdev)
+ return -ENODEV;
+
+ /* Use platform specific implementation if it exists. */
+ if (kbdev->pm.backend.callback_power_runtime_idle)
+ return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
+
+ return 0;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+/** The power management operations for the platform driver.
+ */
+static const struct dev_pm_ops kbase_pm_ops = {
+ .suspend = kbase_device_suspend,
+ .resume = kbase_device_resume,
+#ifdef KBASE_PM_RUNTIME
+ .runtime_suspend = kbase_device_runtime_suspend,
+ .runtime_resume = kbase_device_runtime_resume,
+ .runtime_idle = kbase_device_runtime_idle,
+#endif /* KBASE_PM_RUNTIME */
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id kbase_dt_ids[] = {
+ { .compatible = "arm,malit6xx" },
+ { .compatible = "arm,mali-midgard" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kbase_dt_ids);
+#endif
+
+static struct platform_driver kbase_platform_driver = {
+ .probe = kbase_platform_device_probe,
+ .remove = kbase_platform_device_remove,
+ .driver = {
+ .name = kbase_drv_name,
+ .owner = THIS_MODULE,
+ .pm = &kbase_pm_ops,
+ .of_match_table = of_match_ptr(kbase_dt_ids),
+ },
+};
+
+/*
+ * The driver will not provide a shortcut to create the Mali platform device
+ * anymore when using Device Tree.
+ */
+#ifdef CONFIG_OF
+module_platform_driver(kbase_platform_driver);
+#else
+
+static int __init kbase_driver_init(void)
+{
+ int ret;
+
+ ret = kbase_platform_early_init();
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+ ret = kbase_platform_fake_register();
+ if (ret)
+ return ret;
+#endif
+ ret = platform_driver_register(&kbase_platform_driver);
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+ if (ret)
+ kbase_platform_fake_unregister();
+#endif
+ return ret;
+}
+
+static void __exit kbase_driver_exit(void)
+{
+ platform_driver_unregister(&kbase_platform_driver);
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+ kbase_platform_fake_unregister();
+#endif
+}
+
+module_init(kbase_driver_init);
+module_exit(kbase_driver_exit);
+
+#endif /* CONFIG_OF */
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
+ __stringify(BASE_UK_VERSION_MAJOR) "." \
+ __stringify(BASE_UK_VERSION_MINOR) ")");
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
+#define CREATE_TRACE_POINTS
+#endif
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+/* Create the trace points (otherwise we just get code to call a tracepoint) */
+#include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
+
+void kbase_trace_mali_pm_status(u32 event, u64 value)
+{
+ trace_mali_pm_status(event, value);
+}
+
+void kbase_trace_mali_pm_power_off(u32 event, u64 value)
+{
+ trace_mali_pm_power_off(event, value);
+}
+
+void kbase_trace_mali_pm_power_on(u32 event, u64 value)
+{
+ trace_mali_pm_power_on(event, value);
+}
+
+void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
+{
+ trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
+}
+
+void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
+{
+ trace_mali_page_fault_insert_pages(event, value);
+}
+
+void kbase_trace_mali_mmu_as_in_use(int event)
+{
+ trace_mali_mmu_as_in_use(event);
+}
+
+void kbase_trace_mali_mmu_as_released(int event)
+{
+ trace_mali_mmu_as_released(event);
+}
+
+void kbase_trace_mali_total_alloc_pages_change(long long int event)
+{
+ trace_mali_total_alloc_pages_change(event);
+}
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+#ifdef CONFIG_MALI_SYSTEM_TRACE
+#include "mali_linux_kbase_trace.h"
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_debug.c b/midgard-0.r2p0/mali_kbase_debug.c
new file mode 100755
index 0000000..fb57ac2
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_debug.c
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+
+static struct kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = {
+ NULL,
+ NULL
+};
+
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param)
+{
+ kbasep_debug_assert_registered_cb.func = func;
+ kbasep_debug_assert_registered_cb.param = param;
+}
+
+void kbasep_debug_assert_call_hook(void)
+{
+ if (kbasep_debug_assert_registered_cb.func != NULL)
+ kbasep_debug_assert_registered_cb.func(kbasep_debug_assert_registered_cb.param);
+}
+KBASE_EXPORT_SYMBOL(kbasep_debug_assert_call_hook);
+
diff --git a/midgard-0.r2p0/mali_kbase_debug.h b/midgard-0.r2p0/mali_kbase_debug.h
new file mode 100755
index 0000000..5fff289
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_debug.h
@@ -0,0 +1,164 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_DEBUG_H
+#define _KBASE_DEBUG_H
+
+#include <linux/bug.h>
+
+/** @brief If equals to 0, a trace containing the file, line, and function will be displayed before each message. */
+#define KBASE_DEBUG_SKIP_TRACE 0
+
+/** @brief If different from 0, the trace will only contain the file and line. */
+#define KBASE_DEBUG_SKIP_FUNCTION_NAME 0
+
+/** @brief Disable the asserts tests if set to 1. Default is to disable the asserts in release. */
+#ifndef KBASE_DEBUG_DISABLE_ASSERTS
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_DEBUG_DISABLE_ASSERTS 0
+#else
+#define KBASE_DEBUG_DISABLE_ASSERTS 1
+#endif
+#endif /* KBASE_DEBUG_DISABLE_ASSERTS */
+
+/** Function type that is called on an KBASE_DEBUG_ASSERT() or KBASE_DEBUG_ASSERT_MSG() */
+typedef void (kbase_debug_assert_hook) (void *);
+
+struct kbasep_debug_assert_cb {
+ kbase_debug_assert_hook *func;
+ void *param;
+};
+
+/**
+ * @def KBASEP_DEBUG_PRINT_TRACE
+ * @brief Private macro containing the format of the trace to display before every message
+ * @sa KBASE_DEBUG_SKIP_TRACE, KBASE_DEBUG_SKIP_FUNCTION_NAME
+ */
+#if !KBASE_DEBUG_SKIP_TRACE
+#define KBASEP_DEBUG_PRINT_TRACE \
+ "In file: " __FILE__ " line: " CSTD_STR2(__LINE__)
+#if !KBASE_DEBUG_SKIP_FUNCTION_NAME
+#define KBASEP_DEBUG_PRINT_FUNCTION __func__
+#else
+#define KBASEP_DEBUG_PRINT_FUNCTION ""
+#endif
+#else
+#define KBASEP_DEBUG_PRINT_TRACE ""
+#endif
+
+/**
+ * @def KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)
+ * @brief (Private) system printing function associated to the @see KBASE_DEBUG_ASSERT_MSG event.
+ * @param trace location in the code from where the message is printed
+ * @param function function from where the message is printed
+ * @param ... Format string followed by format arguments.
+ * @note function parameter cannot be concatenated with other strings
+ */
+/* Select the correct system output function*/
+#ifdef CONFIG_MALI_DEBUG
+#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)\
+ do { \
+ pr_err("Mali<ASSERT>: %s function:%s ", trace, function);\
+ pr_err(__VA_ARGS__);\
+ pr_err("\n");\
+ } while (false)
+#else
+#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...) CSTD_NOP()
+#endif
+
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_CALL_ASSERT_HOOK() kbasep_debug_assert_call_hook()
+#else
+#define KBASE_CALL_ASSERT_HOOK() CSTD_NOP()
+#endif
+
+/**
+ * @def KBASE_DEBUG_ASSERT(expr)
+ * @brief Calls @see KBASE_PRINT_ASSERT and prints the expression @a expr if @a expr is false
+ *
+ * @note This macro does nothing if the flag @see KBASE_DEBUG_DISABLE_ASSERTS is set to 1
+ *
+ * @param expr Boolean expression
+ */
+#define KBASE_DEBUG_ASSERT(expr) \
+ KBASE_DEBUG_ASSERT_MSG(expr, #expr)
+
+#if KBASE_DEBUG_DISABLE_ASSERTS
+#define KBASE_DEBUG_ASSERT_MSG(expr, ...) CSTD_NOP()
+#else
+ /**
+ * @def KBASE_DEBUG_ASSERT_MSG(expr, ...)
+ * @brief Calls @see KBASEP_DEBUG_ASSERT_OUT and prints the given message if @a expr is false
+ *
+ * @note This macro does nothing if the flag @see KBASE_DEBUG_DISABLE_ASSERTS is set to 1
+ *
+ * @param expr Boolean expression
+ * @param ... Message to display when @a expr is false, as a format string followed by format arguments.
+ */
+#define KBASE_DEBUG_ASSERT_MSG(expr, ...) \
+ do { \
+ if (!(expr)) { \
+ KBASEP_DEBUG_ASSERT_OUT(KBASEP_DEBUG_PRINT_TRACE, KBASEP_DEBUG_PRINT_FUNCTION, __VA_ARGS__);\
+ KBASE_CALL_ASSERT_HOOK();\
+ BUG();\
+ } \
+ } while (false)
+#endif /* KBASE_DEBUG_DISABLE_ASSERTS */
+
+/**
+ * @def KBASE_DEBUG_CODE( X )
+ * @brief Executes the code inside the macro only in debug mode
+ *
+ * @param X Code to compile only in debug mode.
+ */
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_DEBUG_CODE(X) X
+#else
+#define KBASE_DEBUG_CODE(X) CSTD_NOP()
+#endif /* CONFIG_MALI_DEBUG */
+
+/** @} */
+
+/**
+ * @brief Register a function to call on ASSERT
+ *
+ * Such functions will \b only be called during Debug mode, and for debugging
+ * features \b only. Do not rely on them to be called in general use.
+ *
+ * To disable the hook, supply NULL to \a func.
+ *
+ * @note This function is not thread-safe, and should only be used to
+ * register/deregister once in the module's lifetime.
+ *
+ * @param[in] func the function to call when an assert is triggered.
+ * @param[in] param the parameter to pass to \a func when calling it
+ */
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param);
+
+/**
+ * @brief Call a debug assert hook previously registered with kbase_debug_assert_register_hook()
+ *
+ * @note This function is not thread-safe with respect to multiple threads
+ * registering functions and parameters with
+ * kbase_debug_assert_register_hook(). Otherwise, thread safety is the
+ * responsibility of the registered hook.
+ */
+void kbasep_debug_assert_call_hook(void);
+
+#endif /* _KBASE_DEBUG_H */
diff --git a/midgard-0.r2p0/mali_kbase_debug_job_fault.c b/midgard-0.r2p0/mali_kbase_debug_job_fault.c
new file mode 100755
index 0000000..83c5c37
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_debug_job_fault.c
@@ -0,0 +1,502 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static bool kbase_is_job_fault_event_pending(struct kbase_device *kbdev)
+{
+ struct list_head *event_list = &kbdev->job_fault_event_list;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ ret = !list_empty(event_list);
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+ return ret;
+}
+
+static bool kbase_ctx_has_no_event_pending(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct list_head *event_list = &kctx->kbdev->job_fault_event_list;
+ struct base_job_fault_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ if (list_empty(event_list)) {
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ return true;
+ }
+ list_for_each_entry(event, event_list, head) {
+ if (event->katom->kctx == kctx) {
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock,
+ flags);
+ return false;
+ }
+ }
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ return true;
+}
+
+/* wait until the fault happen and copy the event */
+static int kbase_job_fault_event_wait(struct kbase_device *kbdev,
+ struct base_job_fault_event *event)
+{
+ struct list_head *event_list = &kbdev->job_fault_event_list;
+ struct base_job_fault_event *event_in;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ if (list_empty(event_list)) {
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ if (wait_event_interruptible(kbdev->job_fault_wq,
+ kbase_is_job_fault_event_pending(kbdev)))
+ return -ERESTARTSYS;
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ }
+
+ event_in = list_entry(event_list->next,
+ struct base_job_fault_event, head);
+ event->event_code = event_in->event_code;
+ event->katom = event_in->katom;
+
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+ return 0;
+
+}
+
+/* remove the event from the queue */
+static struct base_job_fault_event *kbase_job_fault_event_dequeue(
+ struct kbase_device *kbdev, struct list_head *event_list)
+{
+ struct base_job_fault_event *event;
+
+ event = list_entry(event_list->next,
+ struct base_job_fault_event, head);
+ list_del(event_list->next);
+
+ return event;
+
+}
+
+/* Remove all the following atoms after the failed atom in the same context
+ * Call the postponed bottom half of job done.
+ * Then, this context could be rescheduled.
+ */
+static void kbase_job_fault_resume_event_cleanup(struct kbase_context *kctx)
+{
+ struct list_head *event_list = &kctx->job_fault_resume_event_list;
+
+ while (!list_empty(event_list)) {
+ struct base_job_fault_event *event;
+
+ event = kbase_job_fault_event_dequeue(kctx->kbdev,
+ &kctx->job_fault_resume_event_list);
+ kbase_jd_done_worker(&event->katom->work);
+ }
+
+}
+
+/* Remove all the failed atoms that belong to different contexts
+ * Resume all the contexts that were suspend due to failed job
+ */
+static void kbase_job_fault_event_cleanup(struct kbase_device *kbdev)
+{
+ struct list_head *event_list = &kbdev->job_fault_event_list;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ while (!list_empty(event_list)) {
+ kbase_job_fault_event_dequeue(kbdev, event_list);
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ wake_up(&kbdev->job_fault_resume_wq);
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ }
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+}
+
+static void kbase_job_fault_resume_worker(struct work_struct *data)
+{
+ struct base_job_fault_event *event = container_of(data,
+ struct base_job_fault_event, job_fault_work);
+ struct kbase_context *kctx;
+ struct kbase_jd_atom *katom;
+
+ katom = event->katom;
+ kctx = katom->kctx;
+
+ dev_info(kctx->kbdev->dev, "Job dumping wait\n");
+
+ /* When it was waked up, it need to check if queue is empty or the
+ * failed atom belongs to different context. If yes, wake up. Both
+ * of them mean the failed job has been dumped. Please note, it
+ * should never happen that the job_fault_event_list has the two
+ * atoms belong to the same context.
+ */
+ wait_event(kctx->kbdev->job_fault_resume_wq,
+ kbase_ctx_has_no_event_pending(kctx));
+
+ atomic_set(&kctx->job_fault_count, 0);
+ kbase_jd_done_worker(&katom->work);
+
+ /* In case the following atoms were scheduled during failed job dump
+ * the job_done_worker was held. We need to rerun it after the dump
+ * was finished
+ */
+ kbase_job_fault_resume_event_cleanup(kctx);
+
+ dev_info(kctx->kbdev->dev, "Job dumping finish, resume scheduler\n");
+}
+
+static struct base_job_fault_event *kbase_job_fault_event_queue(
+ struct list_head *event_list,
+ struct kbase_jd_atom *atom,
+ u32 completion_code)
+{
+ struct base_job_fault_event *event;
+
+ event = &atom->fault_event;
+
+ event->katom = atom;
+ event->event_code = completion_code;
+
+ list_add_tail(&event->head, event_list);
+
+ return event;
+
+}
+
+static void kbase_job_fault_event_post(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, u32 completion_code)
+{
+ struct base_job_fault_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ event = kbase_job_fault_event_queue(&kbdev->job_fault_event_list,
+ katom, completion_code);
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+ wake_up_interruptible(&kbdev->job_fault_wq);
+
+ INIT_WORK(&event->job_fault_work, kbase_job_fault_resume_worker);
+ queue_work(kbdev->job_fault_resume_workq, &event->job_fault_work);
+
+ dev_info(katom->kctx->kbdev->dev, "Job fault happen, start dump: %d_%d",
+ katom->kctx->tgid, katom->kctx->id);
+
+}
+
+/*
+ * This function will process the job fault
+ * Get the register copy
+ * Send the failed job dump event
+ * Create a Wait queue to wait until the job dump finish
+ */
+
+bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
+ u32 completion_code)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ /* Check if dumping is in the process
+ * only one atom of each context can be dumped at the same time
+ * If the atom belongs to different context, it can be dumped
+ */
+ if (atomic_read(&kctx->job_fault_count) > 0) {
+ kbase_job_fault_event_queue(
+ &kctx->job_fault_resume_event_list,
+ katom, completion_code);
+ dev_info(kctx->kbdev->dev, "queue:%d\n",
+ kbase_jd_atom_id(kctx, katom));
+ return true;
+ }
+
+ if (kctx->kbdev->job_fault_debug == true) {
+
+ if (completion_code != BASE_JD_EVENT_DONE) {
+
+ if (kbase_job_fault_get_reg_snapshot(kctx) == false) {
+ dev_warn(kctx->kbdev->dev, "get reg dump failed\n");
+ return false;
+ }
+
+ kbase_job_fault_event_post(kctx->kbdev, katom,
+ completion_code);
+ atomic_inc(&kctx->job_fault_count);
+ dev_info(kctx->kbdev->dev, "post:%d\n",
+ kbase_jd_atom_id(kctx, katom));
+ return true;
+
+ }
+ }
+ return false;
+
+}
+
+static int debug_job_fault_show(struct seq_file *m, void *v)
+{
+ struct kbase_device *kbdev = m->private;
+ struct base_job_fault_event *event = (struct base_job_fault_event *)v;
+ struct kbase_context *kctx = event->katom->kctx;
+ int i;
+
+ dev_info(kbdev->dev, "debug job fault seq show:%d_%d, %d",
+ kctx->tgid, kctx->id, event->reg_offset);
+
+ if (kctx->reg_dump == NULL) {
+ dev_warn(kbdev->dev, "reg dump is NULL");
+ return -1;
+ }
+
+ if (kctx->reg_dump[event->reg_offset] ==
+ REGISTER_DUMP_TERMINATION_FLAG) {
+ /* Return the error here to stop the read. And the
+ * following next() will not be called. The stop can
+ * get the real event resource and release it
+ */
+ return -1;
+ }
+
+ if (event->reg_offset == 0)
+ seq_printf(m, "%d_%d\n", kctx->tgid, kctx->id);
+
+ for (i = 0; i < 50; i++) {
+ if (kctx->reg_dump[event->reg_offset] ==
+ REGISTER_DUMP_TERMINATION_FLAG) {
+ break;
+ }
+ seq_printf(m, "%08x: %08x\n",
+ kctx->reg_dump[event->reg_offset],
+ kctx->reg_dump[1+event->reg_offset]);
+ event->reg_offset += 2;
+
+ }
+
+
+ return 0;
+}
+static void *debug_job_fault_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct kbase_device *kbdev = m->private;
+ struct base_job_fault_event *event = (struct base_job_fault_event *)v;
+
+ dev_info(kbdev->dev, "debug job fault seq next:%d, %d",
+ event->reg_offset, (int)*pos);
+
+ return event;
+}
+
+static void *debug_job_fault_start(struct seq_file *m, loff_t *pos)
+{
+ struct kbase_device *kbdev = m->private;
+ struct base_job_fault_event *event;
+
+ dev_info(kbdev->dev, "fault job seq start:%d", (int)*pos);
+
+ /* The condition is trick here. It needs make sure the
+ * fault hasn't happened and the dumping hasn't been started,
+ * or the dumping has finished
+ */
+ if (*pos == 0) {
+ event = kmalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return NULL;
+ event->reg_offset = 0;
+ if (kbase_job_fault_event_wait(kbdev, event)) {
+ kfree(event);
+ return NULL;
+ }
+
+ /* The cache flush workaround is called in bottom half of
+ * job done but we delayed it. Now we should clean cache
+ * earlier. Then the GPU memory dump should be correct.
+ */
+ if (event->katom->need_cache_flush_cores_retained) {
+ kbase_gpu_cacheclean(kbdev, event->katom);
+ event->katom->need_cache_flush_cores_retained = 0;
+ }
+
+ } else
+ return NULL;
+
+ return event;
+}
+
+static void debug_job_fault_stop(struct seq_file *m, void *v)
+{
+ struct kbase_device *kbdev = m->private;
+
+ /* here we wake up the kbase_jd_done_worker after stop, it needs
+ * get the memory dump before the register dump in debug daemon,
+ * otherwise, the memory dump may be incorrect.
+ */
+
+ if (v != NULL) {
+ kfree(v);
+ dev_info(kbdev->dev, "debug job fault seq stop stage 1");
+
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+ if (!list_empty(&kbdev->job_fault_event_list)) {
+ kbase_job_fault_event_dequeue(kbdev,
+ &kbdev->job_fault_event_list);
+ wake_up(&kbdev->job_fault_resume_wq);
+ }
+ spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+ dev_info(kbdev->dev, "debug job fault seq stop stage 2");
+ }
+
+}
+
+static const struct seq_operations ops = {
+ .start = debug_job_fault_start,
+ .next = debug_job_fault_next,
+ .stop = debug_job_fault_stop,
+ .show = debug_job_fault_show,
+};
+
+static int debug_job_fault_open(struct inode *in, struct file *file)
+{
+ struct kbase_device *kbdev = in->i_private;
+
+ seq_open(file, &ops);
+
+ ((struct seq_file *)file->private_data)->private = kbdev;
+ dev_info(kbdev->dev, "debug job fault seq open");
+
+ kbdev->job_fault_debug = true;
+
+ return 0;
+
+}
+
+static int debug_job_fault_release(struct inode *in, struct file *file)
+{
+ struct kbase_device *kbdev = in->i_private;
+
+ seq_release(in, file);
+
+ kbdev->job_fault_debug = false;
+
+ /* Clean the unprocessed job fault. After that, all the suspended
+ * contexts could be rescheduled.
+ */
+ kbase_job_fault_event_cleanup(kbdev);
+
+ dev_info(kbdev->dev, "debug job fault seq close");
+
+ return 0;
+}
+
+static const struct file_operations kbasep_debug_job_fault_fops = {
+ .open = debug_job_fault_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = debug_job_fault_release,
+};
+
+/*
+ * Initialize debugfs entry for job fault dump
+ */
+void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_file("job_fault", S_IRUGO,
+ kbdev->mali_debugfs_directory, kbdev,
+ &kbasep_debug_job_fault_fops);
+}
+
+
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
+{
+
+ INIT_LIST_HEAD(&kbdev->job_fault_event_list);
+
+ init_waitqueue_head(&(kbdev->job_fault_wq));
+ init_waitqueue_head(&(kbdev->job_fault_resume_wq));
+ spin_lock_init(&kbdev->job_fault_event_lock);
+
+ kbdev->job_fault_resume_workq = alloc_workqueue(
+ "kbase_job_fault_resume_work_queue", WQ_MEM_RECLAIM, 1);
+ if (!kbdev->job_fault_resume_workq)
+ return -ENOMEM;
+
+ kbdev->job_fault_debug = false;
+
+ return 0;
+}
+
+/*
+ * Release the relevant resource per device
+ */
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev)
+{
+ destroy_workqueue(kbdev->job_fault_resume_workq);
+}
+
+
+/*
+ * Initialize the relevant data structure per context
+ */
+void kbase_debug_job_fault_context_init(struct kbase_context *kctx)
+{
+
+ /* We need allocate double size register range
+ * Because this memory will keep the register address and value
+ */
+ kctx->reg_dump = vmalloc(0x4000 * 2);
+ if (kctx->reg_dump == NULL)
+ return;
+
+ if (kbase_debug_job_fault_reg_snapshot_init(kctx, 0x4000) == false) {
+ vfree(kctx->reg_dump);
+ kctx->reg_dump = NULL;
+ }
+ INIT_LIST_HEAD(&kctx->job_fault_resume_event_list);
+ atomic_set(&kctx->job_fault_count, 0);
+
+}
+
+/*
+ * release the relevant resource per context
+ */
+void kbase_debug_job_fault_context_term(struct kbase_context *kctx)
+{
+ vfree(kctx->reg_dump);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
+{
+ kbdev->job_fault_debug = false;
+
+ return 0;
+}
+
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/midgard-0.r2p0/mali_kbase_debug_job_fault.h b/midgard-0.r2p0/mali_kbase_debug_job_fault.h
new file mode 100755
index 0000000..a2bf898
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_debug_job_fault.h
@@ -0,0 +1,96 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_DEBUG_JOB_FAULT_H
+#define _KBASE_DEBUG_JOB_FAULT_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define REGISTER_DUMP_TERMINATION_FLAG 0xFFFFFFFF
+
+/**
+ * kbase_debug_job_fault_dev_init - Create the fault event wait queue
+ * per device and initialize the required lists.
+ * @kbdev: Device pointer
+ *
+ * Return: Zero on success or a negative error code.
+ */
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_debugfs_init - Initialize job fault debug sysfs
+ * @kbdev: Device pointer
+ */
+void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_dev_term - Clean up resources created in
+ * kbase_debug_job_fault_dev_init.
+ * @kbdev: Device pointer
+ */
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_context_init - Initialize the relevant
+ * data structure per context
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_context_init(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_context_term - Release the relevant
+ * resource per context
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_context_term(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_process - Process the failed job.
+ * It will send a event and wake up the job fault waiting queue
+ * Then create a work queue to wait for job dump finish
+ * This function should be called in the interrupt handler and before
+ * jd_done that make sure the jd_done_worker will be delayed until the
+ * job dump finish
+ * @katom: The failed atom pointer
+ * @completion_code: the job status
+ * @return true if dump is going on
+ */
+bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
+ u32 completion_code);
+
+
+/**
+ * kbase_debug_job_fault_reg_snapshot_init - Set the interested registers
+ * address during the job fault process, the relevant registers will
+ * be saved when a job fault happen
+ * @kctx: KBase context pointer
+ * @reg_range: Maximum register address space
+ * @return true if initializing successfully
+ */
+bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
+ int reg_range);
+
+/**
+ * kbase_job_fault_get_reg_snapshot - Read the interested registers for
+ * failed job dump
+ * @kctx: KBase context pointer
+ * @return true if getting registers successfully
+ */
+bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx);
+
+#endif /*_KBASE_DEBUG_JOB_FAULT_H*/
diff --git a/midgard-0.r2p0/mali_kbase_debug_mem_view.c b/midgard-0.r2p0/mali_kbase_debug_mem_view.c
new file mode 100755
index 0000000..a98355e
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_debug_mem_view.c
@@ -0,0 +1,279 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Debugfs interface to dump the memory visible to the GPU
+ */
+
+#include "mali_kbase_debug_mem_view.h"
+#include "mali_kbase.h"
+
+#include <linux/list.h>
+#include <linux/file.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+struct debug_mem_mapping {
+ struct list_head node;
+
+ struct kbase_mem_phy_alloc *alloc;
+ unsigned long flags;
+
+ u64 start_pfn;
+ size_t nr_pages;
+};
+
+struct debug_mem_data {
+ struct list_head mapping_list;
+ struct kbase_context *kctx;
+};
+
+struct debug_mem_seq_off {
+ struct list_head *lh;
+ size_t offset;
+};
+
+static void *debug_mem_start(struct seq_file *m, loff_t *_pos)
+{
+ struct debug_mem_data *mem_data = m->private;
+ struct debug_mem_seq_off *data;
+ struct debug_mem_mapping *map;
+ loff_t pos = *_pos;
+
+ list_for_each_entry(map, &mem_data->mapping_list, node) {
+ if (pos >= map->nr_pages) {
+ pos -= map->nr_pages;
+ } else {
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+ data->lh = &map->node;
+ data->offset = pos;
+ return data;
+ }
+ }
+
+ /* Beyond the end */
+ return NULL;
+}
+
+static void debug_mem_stop(struct seq_file *m, void *v)
+{
+ kfree(v);
+}
+
+static void *debug_mem_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct debug_mem_data *mem_data = m->private;
+ struct debug_mem_seq_off *data = v;
+ struct debug_mem_mapping *map;
+
+ map = list_entry(data->lh, struct debug_mem_mapping, node);
+
+ if (data->offset < map->nr_pages - 1) {
+ data->offset++;
+ ++*pos;
+ return data;
+ }
+
+ if (list_is_last(data->lh, &mem_data->mapping_list))
+ return NULL;
+
+ data->lh = data->lh->next;
+ data->offset = 0;
+ ++*pos;
+
+ return data;
+}
+
+static int debug_mem_show(struct seq_file *m, void *v)
+{
+ struct debug_mem_data *mem_data = m->private;
+ struct debug_mem_seq_off *data = v;
+ struct debug_mem_mapping *map;
+ int i, j;
+ struct page *page;
+ uint32_t *mapping;
+ pgprot_t prot = PAGE_KERNEL;
+
+ map = list_entry(data->lh, struct debug_mem_mapping, node);
+
+ kbase_gpu_vm_lock(mem_data->kctx);
+
+ if (data->offset >= map->alloc->nents) {
+ seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn +
+ data->offset) << PAGE_SHIFT);
+ goto out;
+ }
+
+ if (!(map->flags & KBASE_REG_CPU_CACHED))
+ prot = pgprot_writecombine(prot);
+
+ page = pfn_to_page(PFN_DOWN(map->alloc->pages[data->offset]));
+ mapping = vmap(&page, 1, VM_MAP, prot);
+ if (!mapping)
+ goto out;
+
+ for (i = 0; i < PAGE_SIZE; i += 4*sizeof(*mapping)) {
+ seq_printf(m, "%016llx:", i + ((map->start_pfn +
+ data->offset) << PAGE_SHIFT));
+
+ for (j = 0; j < 4*sizeof(*mapping); j += sizeof(*mapping))
+ seq_printf(m, " %08x", mapping[(i+j)/sizeof(*mapping)]);
+ seq_putc(m, '\n');
+ }
+
+ vunmap(mapping);
+
+ seq_putc(m, '\n');
+
+out:
+ kbase_gpu_vm_unlock(mem_data->kctx);
+ return 0;
+}
+
+static const struct seq_operations ops = {
+ .start = debug_mem_start,
+ .next = debug_mem_next,
+ .stop = debug_mem_stop,
+ .show = debug_mem_show,
+};
+
+static int debug_mem_open(struct inode *i, struct file *file)
+{
+ struct file *kctx_file = i->i_private;
+ struct kbase_context *kctx = kctx_file->private_data;
+ struct rb_node *p;
+ struct debug_mem_data *mem_data;
+ int ret;
+
+ ret = seq_open(file, &ops);
+ if (ret)
+ return ret;
+
+ mem_data = kmalloc(sizeof(*mem_data), GFP_KERNEL);
+ if (!mem_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mem_data->kctx = kctx;
+
+ INIT_LIST_HEAD(&mem_data->mapping_list);
+
+ get_file(kctx_file);
+
+ kbase_gpu_vm_lock(kctx);
+
+ for (p = rb_first(&kctx->reg_rbtree); p; p = rb_next(p)) {
+ struct kbase_va_region *reg;
+ struct debug_mem_mapping *mapping;
+
+ reg = rb_entry(p, struct kbase_va_region, rblink);
+
+ if (reg->gpu_alloc == NULL)
+ /* Empty region - ignore */
+ continue;
+
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+ ret = -ENOMEM;
+ kbase_gpu_vm_unlock(kctx);
+ goto out;
+ }
+
+ mapping->alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ mapping->start_pfn = reg->start_pfn;
+ mapping->nr_pages = reg->nr_pages;
+ mapping->flags = reg->flags;
+ list_add_tail(&mapping->node, &mem_data->mapping_list);
+ }
+
+ kbase_gpu_vm_unlock(kctx);
+
+ ((struct seq_file *)file->private_data)->private = mem_data;
+
+ return 0;
+
+out:
+ if (mem_data) {
+ while (!list_empty(&mem_data->mapping_list)) {
+ struct debug_mem_mapping *mapping;
+
+ mapping = list_first_entry(&mem_data->mapping_list,
+ struct debug_mem_mapping, node);
+ kbase_mem_phy_alloc_put(mapping->alloc);
+ list_del(&mapping->node);
+ kfree(mapping);
+ }
+ fput(kctx_file);
+ kfree(mem_data);
+ }
+ seq_release(i, file);
+ return ret;
+}
+
+static int debug_mem_release(struct inode *inode, struct file *file)
+{
+ struct file *kctx_file = inode->i_private;
+ struct seq_file *sfile = file->private_data;
+ struct debug_mem_data *mem_data = sfile->private;
+ struct debug_mem_mapping *mapping;
+
+ seq_release(inode, file);
+
+ while (!list_empty(&mem_data->mapping_list)) {
+ mapping = list_first_entry(&mem_data->mapping_list,
+ struct debug_mem_mapping, node);
+ kbase_mem_phy_alloc_put(mapping->alloc);
+ list_del(&mapping->node);
+ kfree(mapping);
+ }
+
+ kfree(mem_data);
+
+ fput(kctx_file);
+
+ return 0;
+}
+
+static const struct file_operations kbase_debug_mem_view_fops = {
+ .open = debug_mem_open,
+ .release = debug_mem_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+/**
+ * kbase_debug_mem_view_init - Initialise the mem_view sysfs file
+ * @kctx_file: The /dev/mali0 file instance for the context
+ *
+ * This function creates a "mem_view" file which can be used to get a view of
+ * the context's memory as the GPU sees it (i.e. using the GPU's page tables).
+ *
+ * The file is cleaned up by a call to debugfs_remove_recursive() deleting the
+ * parent directory.
+ */
+void kbase_debug_mem_view_init(struct file *kctx_file)
+{
+ struct kbase_context *kctx = kctx_file->private_data;
+
+ debugfs_create_file("mem_view", S_IRUGO, kctx->kctx_dentry, kctx_file,
+ &kbase_debug_mem_view_fops);
+}
+
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_debug_mem_view.h b/midgard-0.r2p0/mali_kbase_debug_mem_view.h
new file mode 100755
index 0000000..20ab51a
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_debug_mem_view.h
@@ -0,0 +1,25 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_DEBUG_MEM_VIEW_H
+#define _KBASE_DEBUG_MEM_VIEW_H
+
+#include <mali_kbase.h>
+
+void kbase_debug_mem_view_init(struct file *kctx_file);
+
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_defs.h b/midgard-0.r2p0/mali_kbase_defs.h
new file mode 100755
index 0000000..df1a6f0
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_defs.h
@@ -0,0 +1,1539 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_defs.h
+ *
+ * Defintions (types, defines, etcs) common to Kbase. They are placed here to
+ * allow the hierarchy of header files to work.
+ */
+
+#ifndef _KBASE_DEFS_H_
+#define _KBASE_DEFS_H_
+
+#include <mali_kbase_config.h>
+#include <mali_base_hwconfig_features.h>
+#include <mali_base_hwconfig_issues.h>
+#include <mali_kbase_mem_lowlevel.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_mmu_mode.h>
+#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_pm.h>
+
+#include <linux/atomic.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+#include <linux/bus_logger.h>
+#endif
+
+
+#ifdef CONFIG_KDS
+#include <linux/kds.h>
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_SYNC
+#include "sync.h"
+#endif /* CONFIG_SYNC */
+
+#include "mali_kbase_dma_fence.h"
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_PM_DEVFREQ
+#include <linux/devfreq.h>
+#endif /* CONFIG_DEVFREQ */
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#if defined(CONFIG_PM_RUNTIME) || \
+ (defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+#define KBASE_PM_RUNTIME 1
+#endif
+
+/** Enable SW tracing when set */
+#ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE
+#define KBASE_TRACE_ENABLE 1
+#endif
+
+#ifndef KBASE_TRACE_ENABLE
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_TRACE_ENABLE 1
+#else
+#define KBASE_TRACE_ENABLE 0
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* KBASE_TRACE_ENABLE */
+
+/** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */
+#define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1
+
+/**
+ * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware.
+ * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU
+ * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware
+ * before resetting.
+ */
+#define ZAP_TIMEOUT 1000
+
+/** Number of milliseconds before we time out on a GPU soft/hard reset */
+#define RESET_TIMEOUT 500
+
+/**
+ * Prevent soft-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * Therefore, soft stop may still be disabled due to HW issues.
+ *
+ * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
+
+/**
+ * Prevent hard-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
+
+/**
+ * The maximum number of Job Slots to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of job slots.
+ */
+#define BASE_JM_MAX_NR_SLOTS 3
+
+/**
+ * The maximum number of Address Spaces to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of Address Spaces
+ */
+#define BASE_MAX_NR_AS 16
+
+/* mmu */
+#define MIDGARD_MMU_VA_BITS 48
+
+#if MIDGARD_MMU_VA_BITS > 39
+#define MIDGARD_MMU_TOPLEVEL 0
+#else
+#define MIDGARD_MMU_TOPLEVEL 1
+#endif
+
+#define MIDGARD_MMU_BOTTOMLEVEL 3
+
+#define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW | KBASE_REG_GPU_WR)
+
+/** setting in kbase_context::as_nr that indicates it's invalid */
+#define KBASEP_AS_NR_INVALID (-1)
+
+#define KBASE_LOCK_REGION_MAX_SIZE (63)
+#define KBASE_LOCK_REGION_MIN_SIZE (11)
+
+#define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */
+#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
+#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_hwaccess_defs.h"
+
+#define KBASEP_FORCE_REPLAY_DISABLED 0
+
+/* Maximum force replay limit when randomization is enabled */
+#define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16
+
+/** Atom has been previously soft-stoppped */
+#define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
+/** Atom has been previously retried to execute */
+#define KBASE_KATOM_FLAGS_RERUN (1<<2)
+#define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
+/** Atom has been previously hard-stopped. */
+#define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4)
+/** Atom has caused us to enter disjoint state */
+#define KBASE_KATOM_FLAG_IN_DISJOINT (1<<5)
+/* Atom blocked on cross-slot dependency */
+#define KBASE_KATOM_FLAG_X_DEP_BLOCKED (1<<7)
+/* Atom has fail dependency on cross-slot dependency */
+#define KBASE_KATOM_FLAG_FAIL_BLOCKER (1<<8)
+/* Atom is currently in the list of atoms blocked on cross-slot dependencies */
+#define KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST (1<<9)
+/* Atom is currently holding a context reference */
+#define KBASE_KATOM_FLAG_HOLDING_CTX_REF (1<<10)
+/* Atom requires GPU to be in protected mode */
+#define KBASE_KATOM_FLAG_PROTECTED (1<<11)
+/* Atom has been stored in runnable_tree */
+#define KBASE_KATOM_FLAG_JSCTX_IN_TREE (1<<12)
+
+/* SW related flags about types of JS_COMMAND action
+ * NOTE: These must be masked off by JS_COMMAND_MASK */
+
+/** This command causes a disjoint event */
+#define JS_COMMAND_SW_CAUSES_DISJOINT 0x100
+
+/** Bitmask of all SW related flags */
+#define JS_COMMAND_SW_BITS (JS_COMMAND_SW_CAUSES_DISJOINT)
+
+#if (JS_COMMAND_SW_BITS & JS_COMMAND_MASK)
+#error JS_COMMAND_SW_BITS not masked off by JS_COMMAND_MASK. Must update JS_COMMAND_SW_<..> bitmasks
+#endif
+
+/** Soft-stop command that causes a Disjoint event. This of course isn't
+ * entirely masked off by JS_COMMAND_MASK */
+#define JS_COMMAND_SOFT_STOP_WITH_SW_DISJOINT \
+ (JS_COMMAND_SW_CAUSES_DISJOINT | JS_COMMAND_SOFT_STOP)
+
+#define KBASEP_ATOM_ID_INVALID BASE_JD_ATOM_COUNT
+
+#ifdef CONFIG_DEBUG_FS
+struct base_job_fault_event {
+
+ u32 event_code;
+ struct kbase_jd_atom *katom;
+ struct work_struct job_fault_work;
+ struct list_head head;
+ int reg_offset;
+};
+
+#endif
+
+struct kbase_jd_atom_dependency {
+ struct kbase_jd_atom *atom;
+ u8 dep_type;
+};
+
+/**
+ * struct kbase_io_access - holds information about 1 register access
+ *
+ * @addr: first bit indicates r/w (r=0, w=1)
+ * @value: value written or read
+ */
+struct kbase_io_access {
+ uintptr_t addr;
+ u32 value;
+};
+
+/**
+ * struct kbase_io_history - keeps track of all recent register accesses
+ *
+ * @enabled: true if register accesses are recorded, false otherwise
+ * @lock: spinlock protecting kbase_io_access array
+ * @count: number of registers read/written
+ * @size: number of elements in kbase_io_access array
+ * @buf: array of kbase_io_access
+ */
+struct kbase_io_history {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ bool enabled;
+#else
+ u32 enabled;
+#endif
+
+ spinlock_t lock;
+ size_t count;
+ u16 size;
+ struct kbase_io_access *buf;
+};
+
+/**
+ * @brief The function retrieves a read-only reference to the atom field from
+ * the kbase_jd_atom_dependency structure
+ *
+ * @param[in] dep kbase jd atom dependency.
+ *
+ * @return readonly reference to dependent ATOM.
+ */
+static inline const struct kbase_jd_atom * kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ return (const struct kbase_jd_atom *)(dep->atom);
+}
+
+/**
+ * @brief The function retrieves a read-only reference to the dependency type field from
+ * the kbase_jd_atom_dependency structure
+ *
+ * @param[in] dep kbase jd atom dependency.
+ *
+ * @return A dependency type value.
+ */
+static inline u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency *dep)
+{
+ LOCAL_ASSERT(dep != NULL);
+
+ return dep->dep_type;
+}
+
+/**
+ * @brief Setter macro for dep_atom array entry in kbase_jd_atom
+ *
+ * @param[in] dep The kbase jd atom dependency.
+ * @param[in] a The ATOM to be set as a dependency.
+ * @param type The ATOM dependency type to be set.
+ *
+ */
+static inline void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency *const_dep,
+ struct kbase_jd_atom *a, u8 type)
+{
+ struct kbase_jd_atom_dependency *dep;
+
+ LOCAL_ASSERT(const_dep != NULL);
+
+ dep = (struct kbase_jd_atom_dependency *)const_dep;
+
+ dep->atom = a;
+ dep->dep_type = type;
+}
+
+/**
+ * @brief Setter macro for dep_atom array entry in kbase_jd_atom
+ *
+ * @param[in] dep The kbase jd atom dependency to be cleared.
+ *
+ */
+static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency *const_dep)
+{
+ struct kbase_jd_atom_dependency *dep;
+
+ LOCAL_ASSERT(const_dep != NULL);
+
+ dep = (struct kbase_jd_atom_dependency *)const_dep;
+
+ dep->atom = NULL;
+ dep->dep_type = BASE_JD_DEP_TYPE_INVALID;
+}
+
+enum kbase_atom_gpu_rb_state {
+ /* Atom is not currently present in slot ringbuffer */
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB,
+ /* Atom is in slot ringbuffer but is blocked on a previous atom */
+ KBASE_ATOM_GPU_RB_WAITING_BLOCKED,
+ /* Atom is in slot ringbuffer but is waiting for a previous protected
+ * mode transition to complete */
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV,
+ /* Atom is in slot ringbuffer but is waiting for proected mode
+ * transition */
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION,
+ /* Atom is in slot ringbuffer but is waiting for cores to become
+ * available */
+ KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE,
+ /* Atom is in slot ringbuffer but is blocked on affinity */
+ KBASE_ATOM_GPU_RB_WAITING_AFFINITY,
+ /* Atom is in slot ringbuffer and ready to run */
+ KBASE_ATOM_GPU_RB_READY,
+ /* Atom is in slot ringbuffer and has been submitted to the GPU */
+ KBASE_ATOM_GPU_RB_SUBMITTED,
+ /* Atom must be returned to JS as soon as it reaches the head of the
+ * ringbuffer due to a previous failure */
+ KBASE_ATOM_GPU_RB_RETURN_TO_JS
+};
+
+enum kbase_atom_enter_protected_state {
+ /*
+ * Starting state:
+ * Check if a transition into protected mode is required.
+ *
+ * NOTE: The integer value of this must
+ * match KBASE_ATOM_EXIT_PROTECTED_CHECK.
+ */
+ KBASE_ATOM_ENTER_PROTECTED_CHECK = 0,
+ /* Wait for vinstr to suspend. */
+ KBASE_ATOM_ENTER_PROTECTED_VINSTR,
+ /* Wait for the L2 to become idle in preparation for
+ * the coherency change. */
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2,
+ /* End state;
+ * Prepare coherency change. */
+ KBASE_ATOM_ENTER_PROTECTED_FINISHED,
+};
+
+enum kbase_atom_exit_protected_state {
+ /*
+ * Starting state:
+ * Check if a transition out of protected mode is required.
+ *
+ * NOTE: The integer value of this must
+ * match KBASE_ATOM_ENTER_PROTECTED_CHECK.
+ */
+ KBASE_ATOM_EXIT_PROTECTED_CHECK = 0,
+ /* Wait for the L2 to become idle in preparation
+ * for the reset. */
+ KBASE_ATOM_EXIT_PROTECTED_IDLE_L2,
+ /* Issue the protected reset. */
+ KBASE_ATOM_EXIT_PROTECTED_RESET,
+ /* End state;
+ * Wait for the reset to complete. */
+ KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT,
+};
+
+struct kbase_ext_res {
+ u64 gpu_address;
+ struct kbase_mem_phy_alloc *alloc;
+};
+
+struct kbase_jd_atom {
+ struct work_struct work;
+ ktime_t start_timestamp;
+ u64 time_spent_us; /**< Total time spent on the GPU in microseconds */
+
+ struct base_jd_udata udata;
+ struct kbase_context *kctx;
+
+ struct list_head dep_head[2];
+ struct list_head dep_item[2];
+ const struct kbase_jd_atom_dependency dep[2];
+ /* List head used during job dispatch job_done processing - as
+ * dependencies may not be entirely resolved at this point, we need to
+ * use a separate list head. */
+ struct list_head jd_item;
+ /* true if atom's jd_item is currently on a list. Prevents atom being
+ * processed twice. */
+ bool in_jd_list;
+
+ u16 nr_extres;
+ struct kbase_ext_res *extres;
+
+ u32 device_nr;
+ u64 affinity;
+ u64 jc;
+ enum kbase_atom_coreref_state coreref_state;
+#ifdef CONFIG_KDS
+ struct list_head node;
+ struct kds_resource_set *kds_rset;
+ bool kds_dep_satisfied;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_SYNC
+ struct sync_fence *fence;
+ struct sync_fence_waiter sync_waiter;
+#endif /* CONFIG_SYNC */
+#ifdef CONFIG_MALI_DMA_FENCE
+ struct {
+ /* This points to the dma-buf fence for this atom. If this is
+ * NULL then there is no fence for this atom and the other
+ * fields related to dma_fence may have invalid data.
+ *
+ * The context and seqno fields contain the details for this
+ * fence.
+ *
+ * This fence is signaled when the katom is completed,
+ * regardless of the event_code of the katom (signal also on
+ * failure).
+ */
+ struct fence *fence;
+ /* The dma-buf fence context number for this atom. A unique
+ * context number is allocated to each katom in the context on
+ * context creation.
+ */
+ unsigned int context;
+ /* The dma-buf fence sequence number for this atom. This is
+ * increased every time this katom uses dma-buf fence.
+ */
+ atomic_t seqno;
+ /* This contains a list of all callbacks set up to wait on
+ * other fences. This atom must be held back from JS until all
+ * these callbacks have been called and dep_count have reached
+ * 0. The initial value of dep_count must be equal to the
+ * number of callbacks on this list.
+ *
+ * This list is protected by jctx.lock. Callbacks are added to
+ * this list when the atom is built and the wait are set up.
+ * All the callbacks then stay on the list until all callbacks
+ * have been called and the atom is queued, or cancelled, and
+ * then all callbacks are taken off the list and freed.
+ */
+ struct list_head callbacks;
+ /* Atomic counter of number of outstandind dma-buf fence
+ * dependencies for this atom. When dep_count reaches 0 the
+ * atom may be queued.
+ *
+ * The special value "-1" may only be set after the count
+ * reaches 0, while holding jctx.lock. This indicates that the
+ * atom has been handled, either queued in JS or cancelled.
+ *
+ * If anyone but the dma-fence worker sets this to -1 they must
+ * ensure that any potentially queued worker must have
+ * completed before allowing the atom to be marked as unused.
+ * This can be done by flushing the fence work queue:
+ * kctx->dma_fence.wq.
+ */
+ atomic_t dep_count;
+ } dma_fence;
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
+ enum base_jd_event_code event_code;
+ base_jd_core_req core_req; /**< core requirements */
+ /** Job Slot to retry submitting to if submission from IRQ handler failed
+ *
+ * NOTE: see if this can be unified into the another member e.g. the event */
+ int retry_submit_on_slot;
+
+ union kbasep_js_policy_job_info sched_info;
+ /* JS atom priority with respect to other atoms on its kctx. */
+ int sched_priority;
+
+ int poking; /* BASE_HW_ISSUE_8316 */
+
+ wait_queue_head_t completed;
+ enum kbase_jd_atom_state status;
+#ifdef CONFIG_GPU_TRACEPOINTS
+ int work_id;
+#endif
+ /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */
+ int slot_nr;
+
+ u32 atom_flags;
+
+ /* Number of times this atom has been retried. Used by replay soft job.
+ */
+ int retry_count;
+
+ enum kbase_atom_gpu_rb_state gpu_rb_state;
+
+ u64 need_cache_flush_cores_retained;
+
+ atomic_t blocked;
+
+ /* Pointer to atom that this atom has same-slot dependency on */
+ struct kbase_jd_atom *pre_dep;
+ /* Pointer to atom that has same-slot dependency on this atom */
+ struct kbase_jd_atom *post_dep;
+
+ /* Pointer to atom that this atom has cross-slot dependency on */
+ struct kbase_jd_atom *x_pre_dep;
+ /* Pointer to atom that has cross-slot dependency on this atom */
+ struct kbase_jd_atom *x_post_dep;
+
+ /* The GPU's flush count recorded at the time of submission, used for
+ * the cache flush optimisation */
+ u32 flush_id;
+
+ struct kbase_jd_atom_backend backend;
+#ifdef CONFIG_DEBUG_FS
+ struct base_job_fault_event fault_event;
+#endif
+
+ /* List head used for two different purposes:
+ * 1. Overflow list for JS ring buffers. If an atom is ready to run,
+ * but there is no room in the JS ring buffer, then the atom is put
+ * on the ring buffer's overflow list using this list node.
+ * 2. List of waiting soft jobs.
+ */
+ struct list_head queue;
+
+ struct kbase_va_region *jit_addr_reg;
+
+ /* If non-zero, this indicates that the atom will fail with the set
+ * event_code when the atom is processed. */
+ enum base_jd_event_code will_fail_event_code;
+
+ /* Atoms will only ever be transitioning into, or out of
+ * protected mode so we do not need two separate fields.
+ */
+ union {
+ enum kbase_atom_enter_protected_state enter;
+ enum kbase_atom_exit_protected_state exit;
+ } protected_state;
+
+ struct rb_node runnable_tree_node;
+
+ /* 'Age' of atom relative to other atoms in the context. */
+ u32 age;
+};
+
+static inline bool kbase_jd_katom_is_protected(const struct kbase_jd_atom *katom)
+{
+ return (bool)(katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED);
+}
+
+/*
+ * Theory of operations:
+ *
+ * Atom objects are statically allocated within the context structure.
+ *
+ * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set.
+ */
+
+#define KBASE_JD_DEP_QUEUE_SIZE 256
+
+struct kbase_jd_context {
+ struct mutex lock;
+ struct kbasep_js_kctx_info sched_info;
+ struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
+
+ /** Tracks all job-dispatch jobs. This includes those not tracked by
+ * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
+ u32 job_nr;
+
+ /** Waitq that reflects whether there are no jobs (including SW-only
+ * dependency jobs). This is set when no jobs are present on the ctx,
+ * and clear when there are jobs.
+ *
+ * @note: Job Dispatcher knows about more jobs than the Job Scheduler:
+ * the Job Scheduler is unaware of jobs that are blocked on dependencies,
+ * and SW-only dependency jobs.
+ *
+ * This waitq can be waited upon to find out when the context jobs are all
+ * done/cancelled (including those that might've been blocked on
+ * dependencies) - and so, whether it can be terminated. However, it should
+ * only be terminated once it is neither present in the policy-queue (see
+ * kbasep_js_policy_try_evict_ctx() ) nor the run-pool (see
+ * kbasep_js_kctx_info::ctx::is_scheduled).
+ *
+ * Since the waitq is only set under kbase_jd_context::lock,
+ * the waiter should also briefly obtain and drop kbase_jd_context::lock to
+ * guarentee that the setter has completed its work on the kbase_context
+ *
+ * This must be updated atomically with:
+ * - kbase_jd_context::job_nr */
+ wait_queue_head_t zero_jobs_wait;
+
+ /** Job Done workqueue. */
+ struct workqueue_struct *job_done_wq;
+
+ spinlock_t tb_lock;
+ u32 *tb;
+ size_t tb_wrap_offset;
+
+#ifdef CONFIG_KDS
+ struct kds_callback kds_cb;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_GPU_TRACEPOINTS
+ atomic_t work_id;
+#endif
+};
+
+struct kbase_device_info {
+ u32 features;
+};
+
+/** Poking state for BASE_HW_ISSUE_8316 */
+enum {
+ KBASE_AS_POKE_STATE_IN_FLIGHT = 1<<0,
+ KBASE_AS_POKE_STATE_KILLING_POKE = 1<<1
+};
+
+/** Poking state for BASE_HW_ISSUE_8316 */
+typedef u32 kbase_as_poke_state;
+
+struct kbase_mmu_setup {
+ u64 transtab;
+ u64 memattr;
+ u64 transcfg;
+};
+
+/**
+ * Important: Our code makes assumptions that a struct kbase_as structure is always at
+ * kbase_device->as[number]. This is used to recover the containing
+ * struct kbase_device from a struct kbase_as structure.
+ *
+ * Therefore, struct kbase_as structures must not be allocated anywhere else.
+ */
+struct kbase_as {
+ int number;
+
+ struct workqueue_struct *pf_wq;
+ struct work_struct work_pagefault;
+ struct work_struct work_busfault;
+ enum kbase_mmu_fault_type fault_type;
+ bool protected_mode;
+ u32 fault_status;
+ u64 fault_addr;
+ u64 fault_extra_addr;
+
+ struct kbase_mmu_setup current_setup;
+
+ /* BASE_HW_ISSUE_8316 */
+ struct workqueue_struct *poke_wq;
+ struct work_struct poke_work;
+ /** Protected by hwaccess_lock */
+ int poke_refcount;
+ /** Protected by hwaccess_lock */
+ kbase_as_poke_state poke_state;
+ struct hrtimer poke_timer;
+};
+
+static inline int kbase_as_has_bus_fault(struct kbase_as *as)
+{
+ return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS;
+}
+
+static inline int kbase_as_has_page_fault(struct kbase_as *as)
+{
+ return as->fault_type == KBASE_MMU_FAULT_TYPE_PAGE;
+}
+
+struct kbasep_mem_device {
+ atomic_t used_pages; /* Tracks usage of OS shared memory. Updated
+ when OS memory is allocated/freed. */
+
+};
+
+#define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
+
+enum kbase_trace_code {
+ /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+ * THIS MUST BE USED AT THE START OF THE ENUM */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
+#include "mali_kbase_trace_defs.h"
+#undef KBASE_TRACE_CODE_MAKE_CODE
+ /* Comma on its own, to extend the list */
+ ,
+ /* Must be the last in the enum */
+ KBASE_TRACE_CODE_COUNT
+};
+
+#define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
+#define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1)
+
+struct kbase_trace {
+ struct timespec timestamp;
+ u32 thread_id;
+ u32 cpu;
+ void *ctx;
+ bool katom;
+ int atom_number;
+ u64 atom_udata[2];
+ u64 gpu_addr;
+ unsigned long info_val;
+ u8 code;
+ u8 jobslot;
+ u8 refcount;
+ u8 flags;
+};
+
+/** Event IDs for the power management framework.
+ *
+ * Any of these events might be missed, so they should not be relied upon to
+ * find the precise state of the GPU at a particular time in the
+ * trace. Overall, we should get a high percentage of these events for
+ * statisical purposes, and so a few missing should not be a problem */
+enum kbase_timeline_pm_event {
+ /* helper for tests */
+ KBASEP_TIMELINE_PM_EVENT_FIRST,
+
+ /** Event reserved for backwards compatibility with 'init' events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST,
+
+ /** The power state of the device has changed.
+ *
+ * Specifically, the device has reached a desired or available state.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED,
+
+ /** The GPU is becoming active.
+ *
+ * This event is sent when the first context is about to use the GPU.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE,
+
+ /** The GPU is becoming idle.
+ *
+ * This event is sent when the last context has finished using the GPU.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_IDLE,
+
+ /** Event reserved for backwards compatibility with 'policy_change'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_4,
+
+ /** Event reserved for backwards compatibility with 'system_suspend'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_5,
+
+ /** Event reserved for backwards compatibility with 'system_resume'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_6,
+
+ /** The job scheduler is requesting to power up/down cores.
+ *
+ * This event is sent when:
+ * - powered down cores are needed to complete a job
+ * - powered up cores are not needed anymore
+ */
+ KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
+
+ KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
+};
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+struct kbase_trace_kctx_timeline {
+ atomic_t jd_atoms_in_flight;
+ u32 owner_tgid;
+};
+
+struct kbase_trace_kbdev_timeline {
+ /* Note: strictly speaking, not needed, because it's in sync with
+ * kbase_device::jm_slots[]::submitted_nr
+ *
+ * But it's kept as an example of how to add global timeline tracking
+ * information
+ *
+ * The caller must hold hwaccess_lock when accessing this */
+ u8 slot_atoms_submitted[BASE_JM_MAX_NR_SLOTS];
+
+ /* Last UID for each PM event */
+ atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1];
+ /* Counter for generating PM event UIDs */
+ atomic_t pm_event_uid_counter;
+ /*
+ * L2 transition state - true indicates that the transition is ongoing
+ * Expected to be protected by hwaccess_lock */
+ bool l2_transitioning;
+};
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+
+struct kbasep_kctx_list_element {
+ struct list_head link;
+ struct kbase_context *kctx;
+};
+
+/**
+ * Data stored per device for power management.
+ *
+ * This structure contains data for the power management framework. There is one
+ * instance of this structure per device in the system.
+ */
+struct kbase_pm_device_data {
+ /**
+ * The lock protecting Power Management structures accessed outside of
+ * IRQ.
+ *
+ * This lock must also be held whenever the GPU is being powered on or
+ * off.
+ */
+ struct mutex lock;
+
+ /** The reference count of active contexts on this device. */
+ int active_count;
+ /** Flag indicating suspending/suspended */
+ bool suspending;
+ /* Wait queue set when active_count == 0 */
+ wait_queue_head_t zero_active_count_wait;
+
+ /**
+ * Bit masks identifying the available shader cores that are specified
+ * via sysfs. One mask per job slot.
+ */
+ u64 debug_core_mask[BASE_JM_MAX_NR_SLOTS];
+ u64 debug_core_mask_all;
+
+ /**
+ * Callback for initializing the runtime power management.
+ *
+ * @param kbdev The kbase device
+ *
+ * @return 0 on success, else error code
+ */
+ int (*callback_power_runtime_init)(struct kbase_device *kbdev);
+
+ /**
+ * Callback for terminating the runtime power management.
+ *
+ * @param kbdev The kbase device
+ */
+ void (*callback_power_runtime_term)(struct kbase_device *kbdev);
+
+ /* Time in milliseconds between each dvfs sample */
+ u32 dvfs_period;
+
+ /* Period of GPU poweroff timer */
+ ktime_t gpu_poweroff_time;
+
+ /* Number of ticks of GPU poweroff timer before shader is powered off */
+ int poweroff_shader_ticks;
+
+ /* Number of ticks of GPU poweroff timer before GPU is powered off */
+ int poweroff_gpu_ticks;
+
+ struct kbase_pm_backend_data backend;
+};
+
+/**
+ * struct kbase_protected_ops - Platform specific functions for GPU protected
+ * mode operations
+ * @protected_mode_enter: Callback to enter protected mode on the GPU
+ * @protected_mode_reset: Callback to reset the GPU and exit protected mode.
+ * @protected_mode_supported: Callback to check if protected mode is supported.
+ */
+struct kbase_protected_ops {
+ /**
+ * protected_mode_enter() - Enter protected mode on the GPU
+ * @kbdev: The kbase device
+ *
+ * Return: 0 on success, non-zero on error
+ */
+ int (*protected_mode_enter)(struct kbase_device *kbdev);
+
+ /**
+ * protected_mode_reset() - Reset the GPU and exit protected mode
+ * @kbdev: The kbase device
+ *
+ * Return: 0 on success, non-zero on error
+ */
+ int (*protected_mode_reset)(struct kbase_device *kbdev);
+
+ /**
+ * protected_mode_supported() - Check if protected mode is supported
+ * @kbdev: The kbase device
+ *
+ * Return: 0 on success, non-zero on error
+ */
+ bool (*protected_mode_supported)(struct kbase_device *kbdev);
+};
+
+
+/**
+ * struct kbase_mem_pool - Page based memory pool for kctx/kbdev
+ * @kbdev: Kbase device where memory is used
+ * @cur_size: Number of free pages currently in the pool (may exceed @max_size
+ * in some corner cases)
+ * @max_size: Maximum number of free pages in the pool
+ * @pool_lock: Lock protecting the pool - must be held when modifying @cur_size
+ * and @page_list
+ * @page_list: List of free pages in the pool
+ * @reclaim: Shrinker for kernel reclaim of free pages
+ * @next_pool: Pointer to next pool where pages can be allocated when this pool
+ * is empty. Pages will spill over to the next pool when this pool
+ * is full. Can be NULL if there is no next pool.
+ */
+struct kbase_mem_pool {
+ struct kbase_device *kbdev;
+ size_t cur_size;
+ size_t max_size;
+ spinlock_t pool_lock;
+ struct list_head page_list;
+ struct shrinker reclaim;
+
+ struct kbase_mem_pool *next_pool;
+};
+
+
+#define DEVNAME_SIZE 16
+
+struct kbase_device {
+ s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS];
+
+ u32 hw_quirks_sc;
+ u32 hw_quirks_tiler;
+ u32 hw_quirks_mmu;
+ u32 hw_quirks_jm;
+
+ struct list_head entry;
+ struct device *dev;
+ struct miscdevice mdev;
+ u64 reg_start;
+ size_t reg_size;
+ void __iomem *reg;
+
+ struct {
+ int irq;
+ int flags;
+ } irqs[3];
+
+ struct clk *clock;
+#ifdef CONFIG_REGULATOR
+ struct regulator *regulator;
+#endif
+ char devname[DEVNAME_SIZE];
+
+#ifdef CONFIG_MALI_NO_MALI
+ void *model;
+ struct kmem_cache *irq_slab;
+ struct workqueue_struct *irq_workq;
+ atomic_t serving_job_irq;
+ atomic_t serving_gpu_irq;
+ atomic_t serving_mmu_irq;
+ spinlock_t reg_op_lock;
+#endif /* CONFIG_MALI_NO_MALI */
+
+ struct kbase_pm_device_data pm;
+ struct kbasep_js_device_data js_data;
+ struct kbase_mem_pool mem_pool;
+ struct kbasep_mem_device memdev;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ struct kbase_as as[BASE_MAX_NR_AS];
+
+ spinlock_t mmu_mask_change;
+
+ struct kbase_gpu_props gpu_props;
+
+ /** List of SW workarounds for HW issues */
+ unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+ /** List of features available */
+ unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+
+ /* Bitmaps of cores that are currently in use (running jobs).
+ * These should be kept up to date by the job scheduler.
+ *
+ * pm.power_change_lock should be held when accessing these members.
+ *
+ * kbase_pm_check_transitions_nolock() should be called when bits are
+ * cleared to update the power management system and allow transitions to
+ * occur. */
+ u64 shader_inuse_bitmap;
+
+ /* Refcount for cores in use */
+ u32 shader_inuse_cnt[64];
+
+ /* Bitmaps of cores the JS needs for jobs ready to run */
+ u64 shader_needed_bitmap;
+
+ /* Refcount for cores needed */
+ u32 shader_needed_cnt[64];
+
+ u32 tiler_inuse_cnt;
+
+ u32 tiler_needed_cnt;
+
+ /* struct for keeping track of the disjoint information
+ *
+ * The state is > 0 if the GPU is in a disjoint state. Otherwise 0
+ * The count is the number of disjoint events that have occurred on the GPU
+ */
+ struct {
+ atomic_t count;
+ atomic_t state;
+ } disjoint_event;
+
+ /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */
+ u32 l2_users_count;
+
+ /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be
+ * submitted to these cores. These are updated by the power management code. The job scheduler should avoid
+ * submitting new jobs to any cores that are not marked as available.
+ *
+ * pm.power_change_lock should be held when accessing these members.
+ */
+ u64 shader_available_bitmap;
+ u64 tiler_available_bitmap;
+ u64 l2_available_bitmap;
+
+ u64 shader_ready_bitmap;
+ u64 shader_transitioning_bitmap;
+
+ s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */
+ s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */
+
+ /* Structure used for instrumentation and HW counters dumping */
+ struct kbase_hwcnt {
+ /* The lock should be used when accessing any of the following members */
+ spinlock_t lock;
+
+ struct kbase_context *kctx;
+ u64 addr;
+
+ struct kbase_instr_backend backend;
+ } hwcnt;
+
+ struct kbase_vinstr_context *vinstr_ctx;
+
+ /*value to be written to the irq_throttle register each time an irq is served */
+ atomic_t irq_throttle_cycles;
+
+#if KBASE_TRACE_ENABLE
+ spinlock_t trace_lock;
+ u16 trace_first_out;
+ u16 trace_next_in;
+ struct kbase_trace *trace_rbuf;
+#endif
+
+ u32 reset_timeout_ms;
+
+ struct mutex cacheclean_lock;
+
+ /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */
+ void *platform_context;
+
+ /* List of kbase_contexts created */
+ struct list_head kctx_list;
+ struct mutex kctx_list_lock;
+
+#ifdef CONFIG_PM_DEVFREQ
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq *devfreq;
+ unsigned long current_freq;
+ unsigned long current_voltage;
+#ifdef CONFIG_DEVFREQ_THERMAL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+ struct devfreq_cooling_device *devfreq_cooling;
+#else
+ struct thermal_cooling_device *devfreq_cooling;
+#endif
+#endif
+#endif
+
+ struct kbase_ipa_context *ipa_ctx;
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ struct kbase_trace_kbdev_timeline timeline;
+#endif
+
+ /*
+ * Control for enabling job dump on failure, set when control debugfs
+ * is opened.
+ */
+ bool job_fault_debug;
+
+#ifdef CONFIG_DEBUG_FS
+ /* directory for debugfs entries */
+ struct dentry *mali_debugfs_directory;
+ /* Root directory for per context entry */
+ struct dentry *debugfs_ctx_directory;
+
+#ifdef CONFIG_MALI_DEBUG
+ /* bit for each as, set if there is new data to report */
+ u64 debugfs_as_read_bitmap;
+#endif /* CONFIG_MALI_DEBUG */
+
+ /* failed job dump, used for separate debug process */
+ wait_queue_head_t job_fault_wq;
+ wait_queue_head_t job_fault_resume_wq;
+ struct workqueue_struct *job_fault_resume_workq;
+ struct list_head job_fault_event_list;
+ spinlock_t job_fault_event_lock;
+ struct kbase_context *kctx_fault;
+
+#if !MALI_CUSTOMER_RELEASE
+ /* Per-device data for register dumping interface */
+ struct {
+ u16 reg_offset; /* Offset of a GPU_CONTROL register to be
+ dumped upon request */
+ } regs_dump_debugfs_data;
+#endif /* !MALI_CUSTOMER_RELEASE */
+#endif /* CONFIG_DEBUG_FS */
+
+ /* fbdump profiling controls set by gator */
+ u32 kbase_profiling_controls[FBDUMP_CONTROL_MAX];
+
+
+#if MALI_CUSTOMER_RELEASE == 0
+ /* Number of jobs that are run before a job is forced to fail and
+ * replay. May be KBASEP_FORCE_REPLAY_DISABLED, to disable forced
+ * failures. */
+ int force_replay_limit;
+ /* Count of jobs between forced failures. Incremented on each job. A
+ * job is forced to fail once this is greater than or equal to
+ * force_replay_limit. */
+ int force_replay_count;
+ /* Core requirement for jobs to be failed and replayed. May be zero. */
+ base_jd_core_req force_replay_core_req;
+ /* true if force_replay_limit should be randomized. The random
+ * value will be in the range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT.
+ */
+ bool force_replay_random;
+#endif
+
+ /* Total number of created contexts */
+ atomic_t ctx_num;
+
+#ifdef CONFIG_DEBUG_FS
+ /* Holds the most recent register accesses */
+ struct kbase_io_history io_history;
+#endif /* CONFIG_DEBUG_FS */
+
+ struct kbase_hwaccess_data hwaccess;
+
+ /* Count of page/bus faults waiting for workqueues to process */
+ atomic_t faults_pending;
+
+ /* true if GPU is powered off or power off operation is in progress */
+ bool poweroff_pending;
+
+
+ /* defaults for new context created for this device */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ bool infinite_cache_active_default;
+#else
+ u32 infinite_cache_active_default;
+#endif
+ size_t mem_pool_max_size_default;
+
+ /* current gpu coherency mode */
+ u32 current_gpu_coherency_mode;
+ /* system coherency mode */
+ u32 system_coherency;
+ /* Flag to track when cci snoops have been enabled on the interface */
+ bool cci_snoop_enabled;
+
+ /* SMC function IDs to call into Trusted firmware to enable/disable
+ * cache snooping. Value of 0 indicates that they are not used
+ */
+ u32 snoop_enable_smc;
+ u32 snoop_disable_smc;
+
+ /* Protected operations */
+ struct kbase_protected_ops *protected_ops;
+
+ /*
+ * true when GPU is put into protected mode
+ */
+ bool protected_mode;
+
+ /*
+ * true when GPU is transitioning into or out of protected mode
+ */
+ bool protected_mode_transition;
+
+ /*
+ * true if protected mode is supported
+ */
+ bool protected_mode_support;
+
+
+#ifdef CONFIG_MALI_DEBUG
+ wait_queue_head_t driver_inactive_wait;
+ bool driver_inactive;
+#endif /* CONFIG_MALI_DEBUG */
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+ /*
+ * Bus logger integration.
+ */
+ struct bus_logger_client *buslogger;
+#endif
+ /* Boolean indicating if an IRQ flush during reset is in progress. */
+ bool irq_reset_flush;
+
+ /* list of inited sub systems. Used during terminate/error recovery */
+ u32 inited_subsys;
+
+ spinlock_t hwaccess_lock;
+
+ /* Protects access to MMU operations */
+ struct mutex mmu_hw_mutex;
+};
+
+/**
+ * struct jsctx_queue - JS context atom queue
+ * @runnable_tree: Root of RB-tree containing currently runnable atoms on this
+ * job slot.
+ * @x_dep_head: Head item of the linked list of atoms blocked on cross-slot
+ * dependencies. Atoms on this list will be moved to the
+ * runnable_tree when the blocking atom completes.
+ *
+ * hwaccess_lock must be held when accessing this structure.
+ */
+struct jsctx_queue {
+ struct rb_root runnable_tree;
+ struct list_head x_dep_head;
+};
+
+
+#define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20) | \
+ (((minor) & 0xFFF) << 8) | \
+ ((0 & 0xFF) << 0))
+
+/**
+ * enum kbase_context_flags - Flags for kbase contexts
+ *
+ * @KCTX_COMPAT: Set when the context process is a compat process, 32-bit
+ * process on a 64-bit kernel.
+ *
+ * @KCTX_RUNNABLE_REF: Set when context is counted in
+ * kbdev->js_data.nr_contexts_runnable. Must hold queue_mutex when accessing.
+ *
+ * @KCTX_ACTIVE: Set when the context is active.
+ *
+ * @KCTX_PULLED: Set when last kick() caused atoms to be pulled from this
+ * context.
+ *
+ * @KCTX_MEM_PROFILE_INITIALIZED: Set when the context's memory profile has been
+ * initialized.
+ *
+ * @KCTX_INFINITE_CACHE: Set when infinite cache is to be enabled for new
+ * allocations. Existing allocations will not change.
+ *
+ * @KCTX_SUBMIT_DISABLED: Set to prevent context from submitting any jobs.
+ *
+ * @KCTX_PRIVILEGED:Set if the context uses an address space and should be kept
+ * scheduled in.
+ *
+ * @KCTX_SCHEDULED: Set when the context is scheduled on the Run Pool.
+ * This is only ever updated whilst the jsctx_mutex is held.
+ *
+ * @KCTX_DYING: Set when the context process is in the process of being evicted.
+ *
+ * All members need to be separate bits. This enum is intended for use in a
+ * bitmask where multiple values get OR-ed together.
+ */
+enum kbase_context_flags {
+ KCTX_COMPAT = 1U << 0,
+ KCTX_RUNNABLE_REF = 1U << 1,
+ KCTX_ACTIVE = 1U << 2,
+ KCTX_PULLED = 1U << 3,
+ KCTX_MEM_PROFILE_INITIALIZED = 1U << 4,
+ KCTX_INFINITE_CACHE = 1U << 5,
+ KCTX_SUBMIT_DISABLED = 1U << 6,
+ KCTX_PRIVILEGED = 1U << 7,
+ KCTX_SCHEDULED = 1U << 8,
+ KCTX_DYING = 1U << 9,
+};
+
+struct kbase_context {
+ struct file *filp;
+ struct kbase_device *kbdev;
+ int id; /* System wide unique id */
+ unsigned long api_version;
+ phys_addr_t pgd;
+ struct list_head event_list;
+ struct list_head event_coalesce_list;
+ struct mutex event_mutex;
+ atomic_t event_closed;
+ struct workqueue_struct *event_workq;
+ atomic_t event_count;
+ int event_coalesce_count;
+
+ atomic_t flags;
+
+ atomic_t setup_complete;
+ atomic_t setup_in_progress;
+
+ u64 *mmu_teardown_pages;
+
+ struct page *aliasing_sink_page;
+
+ struct mutex mmu_lock;
+ struct mutex reg_lock; /* To be converted to a rwlock? */
+ struct rb_root reg_rbtree; /* Red-Black tree of GPU regions (live regions) */
+
+ unsigned long cookies;
+ struct kbase_va_region *pending_regions[BITS_PER_LONG];
+
+ wait_queue_head_t event_queue;
+ pid_t tgid;
+ pid_t pid;
+
+ struct kbase_jd_context jctx;
+ atomic_t used_pages;
+ atomic_t nonmapped_pages;
+
+ struct kbase_mem_pool mem_pool;
+
+ struct shrinker reclaim;
+ struct list_head evict_list;
+ struct mutex evict_lock;
+
+ struct list_head waiting_soft_jobs;
+ spinlock_t waiting_soft_jobs_lock;
+#ifdef CONFIG_KDS
+ struct list_head waiting_kds_resource;
+#endif
+#ifdef CONFIG_MALI_DMA_FENCE
+ struct {
+ struct list_head waiting_resource;
+ struct workqueue_struct *wq;
+ } dma_fence;
+#endif /* CONFIG_MALI_DMA_FENCE */
+ /** This is effectively part of the Run Pool, because it only has a valid
+ * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in
+ *
+ * The hwaccess_lock must be held whilst accessing this.
+ *
+ * If the context relating to this as_nr is required, you must use
+ * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear
+ * whilst you're using it. Alternatively, just hold the hwaccess_lock
+ * to ensure the context doesn't disappear (but this has restrictions on what other locks
+ * you can take whilst doing this) */
+ int as_nr;
+
+ /* NOTE:
+ *
+ * Flags are in jctx.sched_info.ctx.flags
+ * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
+ *
+ * All other flags must be added there */
+ spinlock_t mm_update_lock;
+ struct mm_struct *process_mm;
+ /* End of the SAME_VA zone */
+ u64 same_va_end;
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ struct kbase_trace_kctx_timeline timeline;
+#endif
+#ifdef CONFIG_DEBUG_FS
+ /* Content of mem_profile file */
+ char *mem_profile_data;
+ /* Size of @c mem_profile_data */
+ size_t mem_profile_size;
+ /* Mutex guarding memory profile state */
+ struct mutex mem_profile_lock;
+ /* Memory profile directory under debugfs */
+ struct dentry *kctx_dentry;
+
+ /* for job fault debug */
+ unsigned int *reg_dump;
+ atomic_t job_fault_count;
+ /* This list will keep the following atoms during the dump
+ * in the same context
+ */
+ struct list_head job_fault_resume_event_list;
+
+#endif /* CONFIG_DEBUG_FS */
+
+ struct jsctx_queue jsctx_queue
+ [KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS];
+
+ /* Number of atoms currently pulled from this context */
+ atomic_t atoms_pulled;
+ /* Number of atoms currently pulled from this context, per slot */
+ atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS];
+ /* Bitmask of slots that can be pulled from */
+ u32 slots_pullable;
+
+ /* Backend specific data */
+ struct kbase_context_backend backend;
+
+ /* Work structure used for deferred ASID assignment */
+ struct work_struct work;
+
+ /* Only one userspace vinstr client per kbase context */
+ struct kbase_vinstr_client *vinstr_cli;
+ struct mutex vinstr_cli_lock;
+
+ /* List of completed jobs waiting for events to be posted */
+ struct list_head completed_jobs;
+ /* Number of work items currently pending on job_done_wq */
+ atomic_t work_count;
+
+ /* Waiting soft-jobs will fail when this timer expires */
+ struct timer_list soft_job_timeout;
+
+ /* JIT allocation management */
+ struct kbase_va_region *jit_alloc[256];
+ struct list_head jit_active_head;
+ struct list_head jit_pool_head;
+ struct list_head jit_destroy_head;
+ struct mutex jit_lock;
+ struct work_struct jit_work;
+
+ /* External sticky resource management */
+ struct list_head ext_res_meta_head;
+
+ /* Used to record that a drain was requested from atomic context */
+ atomic_t drain_pending;
+
+ /* Current age count, used to determine age for newly submitted atoms */
+ u32 age_count;
+};
+
+/**
+ * struct kbase_ctx_ext_res_meta - Structure which binds an external resource
+ * to a @kbase_context.
+ * @ext_res_node: List head for adding the metadata to a
+ * @kbase_context.
+ * @alloc: The physical memory allocation structure
+ * which is mapped.
+ * @gpu_addr: The GPU virtual address the resource is
+ * mapped to.
+ *
+ * External resources can be mapped into multiple contexts as well as the same
+ * context multiple times.
+ * As kbase_va_region itself isn't refcounted we can't attach our extra
+ * information to it as it could be removed under our feet leaving external
+ * resources pinned.
+ * This metadata structure binds a single external resource to a single
+ * context, ensuring that per context mapping is tracked separately so it can
+ * be overridden when needed and abuses by the application (freeing the resource
+ * multiple times) don't effect the refcount of the physical allocation.
+ */
+struct kbase_ctx_ext_res_meta {
+ struct list_head ext_res_node;
+ struct kbase_mem_phy_alloc *alloc;
+ u64 gpu_addr;
+};
+
+enum kbase_reg_access_type {
+ REG_READ,
+ REG_WRITE
+};
+
+enum kbase_share_attr_bits {
+ /* (1ULL << 8) bit is reserved */
+ SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */
+ SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */
+};
+
+/**
+ * kbase_device_is_cpu_coherent - Returns if the device is CPU coherent.
+ * @kbdev: kbase device
+ *
+ * Return: true if the device access are coherent, false if not.
+ */
+static inline bool kbase_device_is_cpu_coherent(struct kbase_device *kbdev)
+{
+ if ((kbdev->system_coherency == COHERENCY_ACE_LITE) ||
+ (kbdev->system_coherency == COHERENCY_ACE))
+ return true;
+
+ return false;
+}
+
+/* Conversion helpers for setting up high resolution timers */
+#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime(((u64)(x))*1000000U))
+#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
+
+/* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
+#define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
+/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
+#define KBASE_AS_INACTIVE_MAX_LOOPS 100000
+
+/* Maximum number of times a job can be replayed */
+#define BASEP_JD_REPLAY_LIMIT 15
+
+/* JobDescriptorHeader - taken from the architecture specifications, the layout
+ * is currently identical for all GPU archs. */
+struct job_descriptor_header {
+ u32 exception_status;
+ u32 first_incomplete_task;
+ u64 fault_pointer;
+ u8 job_descriptor_size : 1;
+ u8 job_type : 7;
+ u8 job_barrier : 1;
+ u8 _reserved_01 : 1;
+ u8 _reserved_1 : 1;
+ u8 _reserved_02 : 1;
+ u8 _reserved_03 : 1;
+ u8 _reserved_2 : 1;
+ u8 _reserved_04 : 1;
+ u8 _reserved_05 : 1;
+ u16 job_index;
+ u16 job_dependency_index_1;
+ u16 job_dependency_index_2;
+ union {
+ u64 _64;
+ u32 _32;
+ } next_job;
+};
+
+#endif /* _KBASE_DEFS_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_device.c b/midgard-0.r2p0/mali_kbase_device.c
new file mode 100755
index 0000000..7484eec
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_device.c
@@ -0,0 +1,697 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel device APIs
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_config_defaults.h>
+
+#include <mali_kbase_profiling_gator_api.h>
+
+/* NOTE: Magic - 0x45435254 (TRCE in ASCII).
+ * Supports tracing feature provided in the base module.
+ * Please keep it in sync with the value of base module.
+ */
+#define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
+
+#if KBASE_TRACE_ENABLE
+static const char *kbasep_trace_code_string[] = {
+ /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+ * THIS MUST BE USED AT THE START OF THE ARRAY */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) # X
+#include "mali_kbase_trace_defs.h"
+#undef KBASE_TRACE_CODE_MAKE_CODE
+};
+#endif
+
+#define DEBUG_MESSAGE_SIZE 256
+
+static int kbasep_trace_init(struct kbase_device *kbdev);
+static void kbasep_trace_term(struct kbase_device *kbdev);
+static void kbasep_trace_hook_wrapper(void *param);
+
+struct kbase_device *kbase_device_alloc(void)
+{
+ return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
+}
+
+static int kbase_device_as_init(struct kbase_device *kbdev, int i)
+{
+ const char format[] = "mali_mmu%d";
+ char name[sizeof(format)];
+ const char poke_format[] = "mali_mmu%d_poker";
+ char poke_name[sizeof(poke_format)];
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ snprintf(poke_name, sizeof(poke_name), poke_format, i);
+
+ snprintf(name, sizeof(name), format, i);
+
+ kbdev->as[i].number = i;
+ kbdev->as[i].fault_addr = 0ULL;
+
+ kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
+ if (!kbdev->as[i].pf_wq)
+ return -EINVAL;
+
+ INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker);
+ INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+ struct hrtimer *poke_timer = &kbdev->as[i].poke_timer;
+ struct work_struct *poke_work = &kbdev->as[i].poke_work;
+
+ kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
+ if (!kbdev->as[i].poke_wq) {
+ destroy_workqueue(kbdev->as[i].pf_wq);
+ return -EINVAL;
+ }
+ KBASE_DEBUG_ASSERT(!object_is_on_stack(poke_work));
+ INIT_WORK(poke_work, kbasep_as_do_poke);
+
+ hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ poke_timer->function = kbasep_as_poke_timer_callback;
+
+ kbdev->as[i].poke_refcount = 0;
+ kbdev->as[i].poke_state = 0u;
+ }
+
+ return 0;
+}
+
+static void kbase_device_as_term(struct kbase_device *kbdev, int i)
+{
+ destroy_workqueue(kbdev->as[i].pf_wq);
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ destroy_workqueue(kbdev->as[i].poke_wq);
+}
+
+static int kbase_device_all_as_init(struct kbase_device *kbdev)
+{
+ int i, err;
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ err = kbase_device_as_init(kbdev, i);
+ if (err)
+ goto free_workqs;
+ }
+
+ return 0;
+
+free_workqs:
+ for (; i > 0; i--)
+ kbase_device_as_term(kbdev, i);
+
+ return err;
+}
+
+static void kbase_device_all_as_term(struct kbase_device *kbdev)
+{
+ int i;
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
+ kbase_device_as_term(kbdev, i);
+}
+
+int kbase_device_init(struct kbase_device * const kbdev)
+{
+ int i, err;
+#ifdef CONFIG_ARM64
+ struct device_node *np = NULL;
+#endif /* CONFIG_ARM64 */
+
+ spin_lock_init(&kbdev->mmu_mask_change);
+ mutex_init(&kbdev->mmu_hw_mutex);
+#ifdef CONFIG_ARM64
+ kbdev->cci_snoop_enabled = false;
+ np = kbdev->dev->of_node;
+ if (np != NULL) {
+ if (of_property_read_u32(np, "snoop_enable_smc",
+ &kbdev->snoop_enable_smc))
+ kbdev->snoop_enable_smc = 0;
+ if (of_property_read_u32(np, "snoop_disable_smc",
+ &kbdev->snoop_disable_smc))
+ kbdev->snoop_disable_smc = 0;
+ /* Either both or none of the calls should be provided. */
+ if (!((kbdev->snoop_disable_smc == 0
+ && kbdev->snoop_enable_smc == 0)
+ || (kbdev->snoop_disable_smc != 0
+ && kbdev->snoop_enable_smc != 0))) {
+ WARN_ON(1);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+#endif /* CONFIG_ARM64 */
+ /* Get the list of workarounds for issues on the current HW
+ * (identified by the GPU_ID register)
+ */
+ err = kbase_hw_set_issues_mask(kbdev);
+ if (err)
+ goto fail;
+
+ /* Set the list of features available on the current HW
+ * (identified by the GPU_ID register)
+ */
+ kbase_hw_set_features_mask(kbdev);
+
+ kbase_gpuprops_set_features(kbdev);
+
+ /* On Linux 4.0+, dma coherency is determined from device tree */
+#if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
+ set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
+#endif
+
+ /* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
+ * device structure was created by device-tree
+ */
+ if (!kbdev->dev->dma_mask)
+ kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
+
+ err = dma_set_mask(kbdev->dev,
+ DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
+ if (err)
+ goto dma_set_mask_failed;
+
+ err = dma_set_coherent_mask(kbdev->dev,
+ DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
+ if (err)
+ goto dma_set_mask_failed;
+
+ kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
+
+ err = kbase_device_all_as_init(kbdev);
+ if (err)
+ goto as_init_failed;
+
+ spin_lock_init(&kbdev->hwcnt.lock);
+
+ err = kbasep_trace_init(kbdev);
+ if (err)
+ goto term_as;
+
+ mutex_init(&kbdev->cacheclean_lock);
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
+ kbdev->timeline.slot_atoms_submitted[i] = 0;
+
+ for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
+ atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+ /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
+ for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
+ kbdev->kbase_profiling_controls[i] = 0;
+
+ kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
+
+ atomic_set(&kbdev->ctx_num, 0);
+
+ err = kbase_instr_backend_init(kbdev);
+ if (err)
+ goto term_trace;
+
+ kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
+
+ kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
+
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
+#else
+ kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+
+#ifdef CONFIG_MALI_DEBUG
+ init_waitqueue_head(&kbdev->driver_inactive_wait);
+#endif /* CONFIG_MALI_DEBUG */
+
+ return 0;
+term_trace:
+ kbasep_trace_term(kbdev);
+term_as:
+ kbase_device_all_as_term(kbdev);
+as_init_failed:
+dma_set_mask_failed:
+fail:
+ return err;
+}
+
+void kbase_device_term(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+
+#if KBASE_TRACE_ENABLE
+ kbase_debug_assert_register_hook(NULL, NULL);
+#endif
+
+ kbase_instr_backend_term(kbdev);
+
+ kbasep_trace_term(kbdev);
+
+ kbase_device_all_as_term(kbdev);
+}
+
+void kbase_device_free(struct kbase_device *kbdev)
+{
+ kfree(kbdev);
+}
+
+int kbase_device_trace_buffer_install(
+ struct kbase_context *kctx, u32 *tb, size_t size)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(tb);
+
+ /* Interface uses 16-bit value to track last accessed entry. Each entry
+ * is composed of two 32-bit words.
+ * This limits the size that can be handled without an overflow. */
+ if (0xFFFF * (2 * sizeof(u32)) < size)
+ return -EINVAL;
+
+ /* set up the header */
+ /* magic number in the first 4 bytes */
+ tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
+ /* Store (write offset = 0, wrap counter = 0, transaction active = no)
+ * write offset 0 means never written.
+ * Offsets 1 to (wrap_offset - 1) used to store values when trace started
+ */
+ tb[1] = 0;
+
+ /* install trace buffer */
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ kctx->jctx.tb_wrap_offset = size / 8;
+ kctx->jctx.tb = tb;
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+
+ return 0;
+}
+
+void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ kctx->jctx.tb = NULL;
+ kctx->jctx.tb_wrap_offset = 0;
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+}
+
+void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ if (kctx->jctx.tb) {
+ u16 wrap_count;
+ u16 write_offset;
+ u32 *tb = kctx->jctx.tb;
+ u32 header_word;
+
+ header_word = tb[1];
+ KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
+
+ wrap_count = (header_word >> 1) & 0x7FFF;
+ write_offset = (header_word >> 16) & 0xFFFF;
+
+ /* mark as transaction in progress */
+ tb[1] |= 0x1;
+ mb();
+
+ /* calculate new offset */
+ write_offset++;
+ if (write_offset == kctx->jctx.tb_wrap_offset) {
+ /* wrap */
+ write_offset = 1;
+ wrap_count++;
+ wrap_count &= 0x7FFF; /* 15bit wrap counter */
+ }
+
+ /* store the trace entry at the selected offset */
+ tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
+ tb[write_offset * 2 + 1] = reg_value;
+ mb();
+
+ /* new header word */
+ header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */
+ tb[1] = header_word;
+ }
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+}
+
+/*
+ * Device trace functions
+ */
+#if KBASE_TRACE_ENABLE
+
+static int kbasep_trace_init(struct kbase_device *kbdev)
+{
+ struct kbase_trace *rbuf;
+
+ rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL);
+
+ if (!rbuf)
+ return -EINVAL;
+
+ kbdev->trace_rbuf = rbuf;
+ spin_lock_init(&kbdev->trace_lock);
+ return 0;
+}
+
+static void kbasep_trace_term(struct kbase_device *kbdev)
+{
+ kfree(kbdev->trace_rbuf);
+}
+
+static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
+{
+ s32 written = 0;
+
+ /* Initial part of message */
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
+
+ if (trace_msg->katom)
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
+
+ /* NOTE: Could add function callbacks to handle different message types */
+ /* Jobslot present */
+ if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT)
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+ /* Refcount present */
+ if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT)
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+ /* Rest of message */
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
+}
+
+static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
+{
+ char buffer[DEBUG_MESSAGE_SIZE];
+
+ kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+ dev_dbg(kbdev->dev, "%s", buffer);
+}
+
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
+{
+ unsigned long irqflags;
+ struct kbase_trace *trace_msg;
+
+ spin_lock_irqsave(&kbdev->trace_lock, irqflags);
+
+ trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
+
+ /* Fill the message */
+ trace_msg->thread_id = task_pid_nr(current);
+ trace_msg->cpu = task_cpu(current);
+
+ getnstimeofday(&trace_msg->timestamp);
+
+ trace_msg->code = code;
+ trace_msg->ctx = ctx;
+
+ if (NULL == katom) {
+ trace_msg->katom = false;
+ } else {
+ trace_msg->katom = true;
+ trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
+ trace_msg->atom_udata[0] = katom->udata.blob[0];
+ trace_msg->atom_udata[1] = katom->udata.blob[1];
+ }
+
+ trace_msg->gpu_addr = gpu_addr;
+ trace_msg->jobslot = jobslot;
+ trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
+ trace_msg->info_val = info_val;
+ trace_msg->flags = flags;
+
+ /* Update the ringbuffer indices */
+ kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
+ if (kbdev->trace_next_in == kbdev->trace_first_out)
+ kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
+
+ /* Done */
+
+ spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
+}
+
+void kbasep_trace_clear(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ kbdev->trace_first_out = kbdev->trace_next_in;
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+}
+
+void kbasep_trace_dump(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ u32 start;
+ u32 end;
+
+ dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ start = kbdev->trace_first_out;
+ end = kbdev->trace_next_in;
+
+ while (start != end) {
+ struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
+
+ kbasep_trace_dump_msg(kbdev, trace_msg);
+
+ start = (start + 1) & KBASE_TRACE_MASK;
+ }
+ dev_dbg(kbdev->dev, "TRACE_END");
+
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+ KBASE_TRACE_CLEAR(kbdev);
+}
+
+static void kbasep_trace_hook_wrapper(void *param)
+{
+ struct kbase_device *kbdev = (struct kbase_device *)param;
+
+ kbasep_trace_dump(kbdev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct trace_seq_state {
+ struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
+ u32 start;
+ u32 end;
+};
+
+static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct trace_seq_state *state = s->private;
+ int i;
+
+ if (*pos > KBASE_TRACE_SIZE)
+ return NULL;
+ i = state->start + *pos;
+ if ((state->end >= state->start && i >= state->end) ||
+ i >= state->end + KBASE_TRACE_SIZE)
+ return NULL;
+
+ i &= KBASE_TRACE_MASK;
+
+ return &state->trace_buf[i];
+}
+
+static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
+{
+}
+
+static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
+{
+ struct trace_seq_state *state = s->private;
+ int i;
+
+ (*pos)++;
+
+ i = (state->start + *pos) & KBASE_TRACE_MASK;
+ if (i == state->end)
+ return NULL;
+
+ return &state->trace_buf[i];
+}
+
+static int kbasep_trace_seq_show(struct seq_file *s, void *data)
+{
+ struct kbase_trace *trace_msg = data;
+ char buffer[DEBUG_MESSAGE_SIZE];
+
+ kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+ seq_printf(s, "%s\n", buffer);
+ return 0;
+}
+
+static const struct seq_operations kbasep_trace_seq_ops = {
+ .start = kbasep_trace_seq_start,
+ .next = kbasep_trace_seq_next,
+ .stop = kbasep_trace_seq_stop,
+ .show = kbasep_trace_seq_show,
+};
+
+static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct kbase_device *kbdev = inode->i_private;
+ unsigned long flags;
+
+ struct trace_seq_state *state;
+
+ state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
+ if (!state)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ state->start = kbdev->trace_first_out;
+ state->end = kbdev->trace_next_in;
+ memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+ return 0;
+}
+
+static const struct file_operations kbasep_trace_debugfs_fops = {
+ .open = kbasep_trace_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_file("mali_trace", S_IRUGO,
+ kbdev->mali_debugfs_directory, kbdev,
+ &kbasep_trace_debugfs_fops);
+}
+
+#else
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#else /* KBASE_TRACE_ENABLE */
+static int kbasep_trace_init(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+ return 0;
+}
+
+static void kbasep_trace_term(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+static void kbasep_trace_hook_wrapper(void *param)
+{
+ CSTD_UNUSED(param);
+}
+
+void kbasep_trace_dump(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+#endif /* KBASE_TRACE_ENABLE */
+
+void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
+{
+ switch (control) {
+ case FBDUMP_CONTROL_ENABLE:
+ /* fall through */
+ case FBDUMP_CONTROL_RATE:
+ /* fall through */
+ case SW_COUNTER_ENABLE:
+ /* fall through */
+ case FBDUMP_CONTROL_RESIZE_FACTOR:
+ kbdev->kbase_profiling_controls[control] = value;
+ break;
+ default:
+ dev_err(kbdev->dev, "Profiling control %d not found\n", control);
+ break;
+ }
+}
+
+u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control)
+{
+ u32 ret_value = 0;
+
+ switch (control) {
+ case FBDUMP_CONTROL_ENABLE:
+ /* fall through */
+ case FBDUMP_CONTROL_RATE:
+ /* fall through */
+ case SW_COUNTER_ENABLE:
+ /* fall through */
+ case FBDUMP_CONTROL_RESIZE_FACTOR:
+ ret_value = kbdev->kbase_profiling_controls[control];
+ break;
+ default:
+ dev_err(kbdev->dev, "Profiling control %d not found\n", control);
+ break;
+ }
+
+ return ret_value;
+}
+
+/*
+ * Called by gator to control the production of
+ * profiling information at runtime
+ * */
+
+void _mali_profiling_control(u32 action, u32 value)
+{
+ struct kbase_device *kbdev = NULL;
+
+ /* find the first i.e. call with -1 */
+ kbdev = kbase_find_device(-1);
+
+ if (NULL != kbdev)
+ kbase_set_profiling_control(kbdev, action, value);
+}
+KBASE_EXPORT_SYMBOL(_mali_profiling_control);
+
diff --git a/midgard-0.r2p0/mali_kbase_disjoint_events.c b/midgard-0.r2p0/mali_kbase_disjoint_events.c
new file mode 100755
index 0000000..f70bccc
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_disjoint_events.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Base kernel disjoint events helper functions
+ */
+
+#include <mali_kbase.h>
+
+void kbase_disjoint_init(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ atomic_set(&kbdev->disjoint_event.count, 0);
+ atomic_set(&kbdev->disjoint_event.state, 0);
+}
+
+/* increment the disjoint event count */
+void kbase_disjoint_event(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ atomic_inc(&kbdev->disjoint_event.count);
+}
+
+/* increment the state and the event counter */
+void kbase_disjoint_state_up(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ atomic_inc(&kbdev->disjoint_event.state);
+
+ kbase_disjoint_event(kbdev);
+}
+
+/* decrement the state */
+void kbase_disjoint_state_down(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(atomic_read(&kbdev->disjoint_event.state) > 0);
+
+ kbase_disjoint_event(kbdev);
+
+ atomic_dec(&kbdev->disjoint_event.state);
+}
+
+/* increments the count only if the state is > 0 */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ if (atomic_read(&kbdev->disjoint_event.state))
+ kbase_disjoint_event(kbdev);
+}
+
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ return atomic_read(&kbdev->disjoint_event.count);
+}
+KBASE_EXPORT_TEST_API(kbase_disjoint_event_get);
diff --git a/midgard-0.r2p0/mali_kbase_dma_fence.c b/midgard-0.r2p0/mali_kbase_dma_fence.c
new file mode 100755
index 0000000..97bb6c5
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_dma_fence.c
@@ -0,0 +1,606 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as
+ * it will be set there.
+ */
+#include "mali_kbase_dma_fence.h"
+
+#include <linux/atomic.h>
+#include <linux/fence.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/ww_mutex.h>
+
+#include <mali_kbase.h>
+
+
+/* Spin lock protecting all Mali fences as fence->lock. */
+static DEFINE_SPINLOCK(kbase_dma_fence_lock);
+
+static void
+kbase_dma_fence_work(struct work_struct *pwork);
+
+static void
+kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource);
+}
+
+void
+kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom)
+{
+ list_del(&katom->queue);
+}
+
+static const char *
+kbase_dma_fence_get_driver_name(struct fence *fence)
+{
+ return kbase_drv_name;
+}
+
+static const char *
+kbase_dma_fence_get_timeline_name(struct fence *fence)
+{
+ return kbase_timeline_name;
+}
+
+static bool
+kbase_dma_fence_enable_signaling(struct fence *fence)
+{
+ /* If in the future we need to add code here remember to
+ * to get a reference to the fence and release it when signaling
+ * as stated in fence.h
+ */
+ return true;
+}
+
+static void
+kbase_dma_fence_fence_value_str(struct fence *fence, char *str, int size)
+{
+ snprintf(str, size, "%u", fence->seqno);
+}
+
+static const struct fence_ops kbase_dma_fence_ops = {
+ .get_driver_name = kbase_dma_fence_get_driver_name,
+ .get_timeline_name = kbase_dma_fence_get_timeline_name,
+ .enable_signaling = kbase_dma_fence_enable_signaling,
+ /* Use the default wait */
+ .wait = fence_default_wait,
+ .fence_value_str = kbase_dma_fence_fence_value_str,
+};
+
+static struct fence *
+kbase_dma_fence_new(unsigned int context, unsigned int seqno)
+{
+ struct fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence_init(fence,
+ &kbase_dma_fence_ops,
+ &kbase_dma_fence_lock,
+ context,
+ seqno);
+
+ return fence;
+}
+
+static int
+kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info,
+ struct ww_acquire_ctx *ctx)
+{
+ struct reservation_object *content_res = NULL;
+ unsigned int content_res_idx = 0;
+ unsigned int r;
+ int err = 0;
+
+ ww_acquire_init(ctx, &reservation_ww_class);
+
+retry:
+ for (r = 0; r < info->dma_fence_resv_count; r++) {
+ if (info->resv_objs[r] == content_res) {
+ content_res = NULL;
+ continue;
+ }
+
+ err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx);
+ if (err)
+ goto error;
+ }
+
+ ww_acquire_done(ctx);
+ return err;
+
+error:
+ content_res_idx = r;
+
+ /* Unlock the locked one ones */
+ while (r--)
+ ww_mutex_unlock(&info->resv_objs[r]->lock);
+
+ if (content_res)
+ ww_mutex_unlock(&content_res->lock);
+
+ /* If we deadlock try with lock_slow and retry */
+ if (err == -EDEADLK) {
+ content_res = info->resv_objs[content_res_idx];
+ ww_mutex_lock_slow(&content_res->lock, ctx);
+ goto retry;
+ }
+
+ /* If we are here the function failed */
+ ww_acquire_fini(ctx);
+ return err;
+}
+
+static void
+kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info,
+ struct ww_acquire_ctx *ctx)
+{
+ unsigned int r;
+
+ for (r = 0; r < info->dma_fence_resv_count; r++)
+ ww_mutex_unlock(&info->resv_objs[r]->lock);
+ ww_acquire_fini(ctx);
+}
+
+/**
+ * kbase_dma_fence_queue_work() - Queue work to handle @katom
+ * @katom: Pointer to atom for which to queue work
+ *
+ * Queue kbase_dma_fence_work() for @katom to clean up the fence callbacks and
+ * submit the atom.
+ */
+static void
+kbase_dma_fence_queue_work(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ bool ret;
+
+ INIT_WORK(&katom->work, kbase_dma_fence_work);
+ ret = queue_work(kctx->dma_fence.wq, &katom->work);
+ /* Warn if work was already queued, that should not happen. */
+ WARN_ON(!ret);
+}
+
+/**
+ * kbase_dma_fence_free_callbacks - Free dma-fence callbacks on a katom
+ * @katom: Pointer to katom
+ * @queue_worker: Boolean indicating if fence worker is to be queued when
+ * dep_count reaches 0.
+ *
+ * This function will free all fence callbacks on the katom's list of
+ * callbacks. Callbacks that have not yet been called, because their fence
+ * hasn't yet signaled, will first be removed from the fence.
+ *
+ * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
+ */
+static void
+kbase_dma_fence_free_callbacks(struct kbase_jd_atom *katom, bool queue_worker)
+{
+ struct kbase_dma_fence_cb *cb, *tmp;
+
+ lockdep_assert_held(&katom->kctx->jctx.lock);
+
+ /* Clean up and free callbacks. */
+ list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
+ bool ret;
+
+ /* Cancel callbacks that hasn't been called yet. */
+ ret = fence_remove_callback(cb->fence, &cb->fence_cb);
+ if (ret) {
+ int ret;
+
+ /* Fence had not signaled, clean up after
+ * canceling.
+ */
+ ret = atomic_dec_return(&katom->dma_fence.dep_count);
+
+ if (unlikely(queue_worker && ret == 0)) {
+ /*
+ * dep_count went to zero and queue_worker is
+ * true. Queue the worker to handle the
+ * completion of the katom.
+ */
+ kbase_dma_fence_queue_work(katom);
+ }
+ }
+
+ /*
+ * Release the reference taken in
+ * kbase_dma_fence_add_callback().
+ */
+ fence_put(cb->fence);
+ list_del(&cb->node);
+ kfree(cb);
+ }
+}
+
+/**
+ * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom
+ * @katom: Katom to cancel
+ *
+ * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
+ */
+static void
+kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&katom->kctx->jctx.lock);
+
+ /* Cancel callbacks and clean up. */
+ kbase_dma_fence_free_callbacks(katom, false);
+
+ KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == 0);
+
+ /* Mark the atom as handled in case all fences signaled just before
+ * canceling the callbacks and the worker was queued.
+ */
+ atomic_set(&katom->dma_fence.dep_count, -1);
+
+ /* Prevent job_done_nolock from being called twice on an atom when
+ * there is a race between job completion and cancellation.
+ */
+
+ if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
+ /* Wait was cancelled - zap the atom */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+ }
+}
+
+/**
+ * kbase_dma_fence_work() - Worker thread called when a fence is signaled
+ * @pwork: work_struct containing a pointer to a katom
+ *
+ * This function will clean and mark all dependencies as satisfied
+ */
+static void
+kbase_dma_fence_work(struct work_struct *pwork)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_jd_context *ctx;
+
+ katom = container_of(pwork, struct kbase_jd_atom, work);
+ ctx = &katom->kctx->jctx;
+
+ mutex_lock(&ctx->lock);
+ if (atomic_read(&katom->dma_fence.dep_count) != 0)
+ goto out;
+
+ atomic_set(&katom->dma_fence.dep_count, -1);
+
+ /* Remove atom from list of dma-fence waiting atoms. */
+ kbase_dma_fence_waiters_remove(katom);
+ /* Cleanup callbacks. */
+ kbase_dma_fence_free_callbacks(katom, false);
+ /*
+ * Queue atom on GPU, unless it has already completed due to a failing
+ * dependency. Run jd_done_nolock() on the katom if it is completed.
+ */
+ if (unlikely(katom->status == KBASE_JD_ATOM_STATE_COMPLETED))
+ jd_done_nolock(katom, NULL);
+ else
+ kbase_jd_dep_clear_locked(katom);
+
+out:
+ mutex_unlock(&ctx->lock);
+}
+
+/**
+ * kbase_dma_fence_add_callback() - Add callback on @fence to block @katom
+ * @katom: Pointer to katom that will be blocked by @fence
+ * @fence: Pointer to fence on which to set up the callback
+ * @callback: Pointer to function to be called when fence is signaled
+ *
+ * Caller needs to hold a reference to @fence when calling this function, and
+ * the caller is responsible for releasing that reference. An additional
+ * reference to @fence will be taken when the callback was successfully set up
+ * and @fence needs to be kept valid until the callback has been called and
+ * cleanup have been done.
+ *
+ * Return: 0 on success: fence was either already signalled, or callback was
+ * set up. Negative error code is returned on error.
+ */
+static int
+kbase_dma_fence_add_callback(struct kbase_jd_atom *katom,
+ struct fence *fence,
+ fence_func_t callback)
+{
+ int err = 0;
+ struct kbase_dma_fence_cb *kbase_fence_cb;
+
+ kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
+ if (!kbase_fence_cb)
+ return -ENOMEM;
+
+ kbase_fence_cb->fence = fence;
+ kbase_fence_cb->katom = katom;
+ INIT_LIST_HEAD(&kbase_fence_cb->node);
+
+ err = fence_add_callback(fence, &kbase_fence_cb->fence_cb, callback);
+ if (err == -ENOENT) {
+ /* Fence signaled, clear the error and return */
+ err = 0;
+ kbase_fence_cb->fence = NULL;
+ kfree(kbase_fence_cb);
+ } else if (err) {
+ kfree(kbase_fence_cb);
+ } else {
+ /*
+ * Get reference to fence that will be kept until callback gets
+ * cleaned up in kbase_dma_fence_free_callbacks().
+ */
+ fence_get(fence);
+ atomic_inc(&katom->dma_fence.dep_count);
+ /* Add callback to katom's list of callbacks */
+ list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
+ }
+
+ return err;
+}
+
+static void
+kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb)
+{
+ struct kbase_dma_fence_cb *kcb = container_of(cb,
+ struct kbase_dma_fence_cb,
+ fence_cb);
+ struct kbase_jd_atom *katom = kcb->katom;
+
+ /* If the atom is zapped dep_count will be forced to a negative number
+ * preventing this callback from ever scheduling work. Which in turn
+ * would reschedule the atom.
+ */
+ if (atomic_dec_and_test(&katom->dma_fence.dep_count))
+ kbase_dma_fence_queue_work(katom);
+}
+
+static int
+kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom,
+ struct reservation_object *resv,
+ bool exclusive)
+{
+ struct fence *excl_fence = NULL;
+ struct fence **shared_fences = NULL;
+ unsigned int shared_count = 0;
+ int err, i;
+
+ err = reservation_object_get_fences_rcu(resv,
+ &excl_fence,
+ &shared_count,
+ &shared_fences);
+ if (err)
+ return err;
+
+ if (excl_fence) {
+ err = kbase_dma_fence_add_callback(katom,
+ excl_fence,
+ kbase_dma_fence_cb);
+
+ /* Release our reference, taken by reservation_object_get_fences_rcu(),
+ * to the fence. We have set up our callback (if that was possible),
+ * and it's the fence's owner is responsible for singling the fence
+ * before allowing it to disappear.
+ */
+ fence_put(excl_fence);
+
+ if (err)
+ goto out;
+ }
+
+ if (exclusive) {
+ for (i = 0; i < shared_count; i++) {
+ err = kbase_dma_fence_add_callback(katom,
+ shared_fences[i],
+ kbase_dma_fence_cb);
+ if (err)
+ goto out;
+ }
+ }
+
+ /* Release all our references to the shared fences, taken by
+ * reservation_object_get_fences_rcu(). We have set up our callback (if
+ * that was possible), and it's the fence's owner is responsible for
+ * signaling the fence before allowing it to disappear.
+ */
+out:
+ for (i = 0; i < shared_count; i++)
+ fence_put(shared_fences[i]);
+ kfree(shared_fences);
+
+ if (err) {
+ /*
+ * On error, cancel and clean up all callbacks that was set up
+ * before the error.
+ */
+ kbase_dma_fence_free_callbacks(katom, false);
+ }
+
+ return err;
+}
+
+void kbase_dma_fence_add_reservation(struct reservation_object *resv,
+ struct kbase_dma_fence_resv_info *info,
+ bool exclusive)
+{
+ unsigned int i;
+
+ for (i = 0; i < info->dma_fence_resv_count; i++) {
+ /* Duplicate resource, ignore */
+ if (info->resv_objs[i] == resv)
+ return;
+ }
+
+ info->resv_objs[info->dma_fence_resv_count] = resv;
+ if (exclusive)
+ set_bit(info->dma_fence_resv_count,
+ info->dma_fence_excl_bitmap);
+ (info->dma_fence_resv_count)++;
+}
+
+int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
+ struct kbase_dma_fence_resv_info *info)
+{
+ int err, i;
+ struct fence *fence;
+ struct ww_acquire_ctx ww_ctx;
+
+ lockdep_assert_held(&katom->kctx->jctx.lock);
+
+ fence = kbase_dma_fence_new(katom->dma_fence.context,
+ atomic_inc_return(&katom->dma_fence.seqno));
+ if (!fence) {
+ err = -ENOMEM;
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d creating fence.\n", err);
+ return err;
+ }
+
+ katom->dma_fence.fence = fence;
+ atomic_set(&katom->dma_fence.dep_count, 1);
+
+ err = kbase_dma_fence_lock_reservations(info, &ww_ctx);
+ if (err) {
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d locking reservations.\n", err);
+ atomic_set(&katom->dma_fence.dep_count, -1);
+ fence_put(fence);
+ return err;
+ }
+
+ for (i = 0; i < info->dma_fence_resv_count; i++) {
+ struct reservation_object *obj = info->resv_objs[i];
+
+ if (!test_bit(i, info->dma_fence_excl_bitmap)) {
+ err = reservation_object_reserve_shared(obj);
+ if (err) {
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d reserving space for shared fence.\n", err);
+ goto end;
+ }
+
+ err = kbase_dma_fence_add_reservation_callback(katom, obj, false);
+ if (err) {
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d adding reservation to callback.\n", err);
+ goto end;
+ }
+
+ reservation_object_add_shared_fence(obj, katom->dma_fence.fence);
+ } else {
+ err = kbase_dma_fence_add_reservation_callback(katom, obj, true);
+ if (err) {
+ dev_err(katom->kctx->kbdev->dev,
+ "Error %d adding reservation to callback.\n", err);
+ goto end;
+ }
+
+ reservation_object_add_excl_fence(obj, katom->dma_fence.fence);
+ }
+ }
+
+end:
+ kbase_dma_fence_unlock_reservations(info, &ww_ctx);
+
+ if (likely(!err)) {
+ /* Test if the callbacks are already triggered */
+ if (atomic_dec_and_test(&katom->dma_fence.dep_count)) {
+ atomic_set(&katom->dma_fence.dep_count, -1);
+ kbase_dma_fence_free_callbacks(katom, false);
+ } else {
+ /* Add katom to the list of dma-buf fence waiting atoms
+ * only if it is still waiting.
+ */
+ kbase_dma_fence_waiters_add(katom);
+ }
+ } else {
+ /* There was an error, cancel callbacks, set dep_count to -1 to
+ * indicate that the atom has been handled (the caller will
+ * kill it for us), signal the fence, free callbacks and the
+ * fence.
+ */
+ kbase_dma_fence_free_callbacks(katom, false);
+ atomic_set(&katom->dma_fence.dep_count, -1);
+ kbase_dma_fence_signal(katom);
+ }
+
+ return err;
+}
+
+void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx)
+{
+ struct list_head *list = &kctx->dma_fence.waiting_resource;
+
+ while (!list_empty(list)) {
+ struct kbase_jd_atom *katom;
+
+ katom = list_first_entry(list, struct kbase_jd_atom, queue);
+ kbase_dma_fence_waiters_remove(katom);
+ kbase_dma_fence_cancel_atom(katom);
+ }
+}
+
+void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom)
+{
+ /* Cancel callbacks and clean up. */
+ kbase_dma_fence_free_callbacks(katom, true);
+}
+
+void kbase_dma_fence_signal(struct kbase_jd_atom *katom)
+{
+ if (!katom->dma_fence.fence)
+ return;
+
+ KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == -1);
+
+ /* Signal the atom's fence. */
+ fence_signal(katom->dma_fence.fence);
+ fence_put(katom->dma_fence.fence);
+ katom->dma_fence.fence = NULL;
+
+ kbase_dma_fence_free_callbacks(katom, false);
+}
+
+void kbase_dma_fence_term(struct kbase_context *kctx)
+{
+ destroy_workqueue(kctx->dma_fence.wq);
+ kctx->dma_fence.wq = NULL;
+}
+
+int kbase_dma_fence_init(struct kbase_context *kctx)
+{
+ INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource);
+
+ kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d",
+ WQ_UNBOUND, 1, kctx->pid);
+ if (!kctx->dma_fence.wq)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/midgard-0.r2p0/mali_kbase_dma_fence.h b/midgard-0.r2p0/mali_kbase_dma_fence.h
new file mode 100755
index 0000000..3b0a69b
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_dma_fence.h
@@ -0,0 +1,150 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_DMA_FENCE_H_
+#define _KBASE_DMA_FENCE_H_
+
+#ifdef CONFIG_MALI_DMA_FENCE
+
+#include <linux/fence.h>
+#include <linux/list.h>
+#include <linux/reservation.h>
+
+
+/* Forward declaration from mali_kbase_defs.h */
+struct kbase_jd_atom;
+struct kbase_context;
+
+/**
+ * struct kbase_dma_fence_cb - Mali dma-fence callback data struct
+ * @fence_cb: Callback function
+ * @katom: Pointer to katom that is waiting on this callback
+ * @fence: Pointer to the fence object on which this callback is waiting
+ * @node: List head for linking this callback to the katom
+ */
+struct kbase_dma_fence_cb {
+ struct fence_cb fence_cb;
+ struct kbase_jd_atom *katom;
+ struct fence *fence;
+ struct list_head node;
+};
+
+/**
+ * struct kbase_dma_fence_resv_info - Structure with list of reservation objects
+ * @resv_objs: Array of reservation objects to attach the
+ * new fence to.
+ * @dma_fence_resv_count: Number of reservation objects in the array.
+ * @dma_fence_excl_bitmap: Specifies which resv_obj are exclusive.
+ *
+ * This is used by some functions to pass around a collection of data about
+ * reservation objects.
+ */
+struct kbase_dma_fence_resv_info {
+ struct reservation_object **resv_objs;
+ unsigned int dma_fence_resv_count;
+ unsigned long *dma_fence_excl_bitmap;
+};
+
+/**
+ * kbase_dma_fence_add_reservation() - Adds a resv to the array of resv_objs
+ * @resv: Reservation object to add to the array.
+ * @info: Pointer to struct with current reservation info
+ * @exclusive: Boolean indicating if exclusive access is needed
+ *
+ * The function adds a new reservation_object to an existing array of
+ * reservation_objects. At the same time keeps track of which objects require
+ * exclusive access in dma_fence_excl_bitmap.
+ */
+void kbase_dma_fence_add_reservation(struct reservation_object *resv,
+ struct kbase_dma_fence_resv_info *info,
+ bool exclusive);
+
+/**
+ * kbase_dma_fence_wait() - Creates a new fence and attaches it to the resv_objs
+ * @katom: Katom with the external dependency.
+ * @info: Pointer to struct with current reservation info
+ *
+ * Return: An error code or 0 if succeeds
+ */
+int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
+ struct kbase_dma_fence_resv_info *info);
+
+/**
+ * kbase_dma_fence_cancel_ctx() - Cancel all dma-fences blocked atoms on kctx
+ * @kctx: Pointer to kbase context
+ *
+ * This function will cancel and clean up all katoms on @kctx that is waiting
+ * on dma-buf fences.
+ *
+ * Locking: jctx.lock needs to be held when calling this function.
+ */
+void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_cancel_callbacks() - Cancel only callbacks on katom
+ * @katom: Pointer to katom whose callbacks are to be canceled
+ *
+ * This function cancels all dma-buf fence callbacks on @katom, but does not
+ * cancel the katom itself.
+ *
+ * The caller is responsible for ensuring that jd_done_nolock is called on
+ * @katom.
+ *
+ * Locking: jctx.lock must be held when calling this function.
+ */
+void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_dma_fence_signal() - Signal katom's fence and clean up after wait
+ * @katom: Pointer to katom to signal and clean up
+ *
+ * This function will signal the @katom's fence, if it has one, and clean up
+ * the callback data from the katom's wait on earlier fences.
+ *
+ * Locking: jctx.lock must be held while calling this function.
+ */
+void kbase_dma_fence_signal(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_dma_fence_term() - Terminate Mali dma-fence context
+ * @kctx: kbase context to terminate
+ */
+void kbase_dma_fence_term(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_init() - Initialize Mali dma-fence context
+ * @kctx: kbase context to initialize
+ */
+int kbase_dma_fence_init(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_waiters_remove()- Remove katom from dma-fence wait list
+ * @katom: Pointer to katom to remove from list
+ */
+void kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom);
+
+#else /* CONFIG_MALI_DMA_FENCE */
+/* Dummy functions for when dma-buf fence isn't enabled. */
+
+static inline int kbase_dma_fence_init(struct kbase_context *kctx)
+{
+ return 0;
+}
+
+static inline void kbase_dma_fence_term(struct kbase_context *kctx) {}
+#endif /* CONFIG_MALI_DMA_FENCE */
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_event.c b/midgard-0.r2p0/mali_kbase_event.c
new file mode 100755
index 0000000..f07406c
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_event.c
@@ -0,0 +1,259 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_debug.h>
+#include <mali_kbase_tlstream.h>
+
+static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ struct base_jd_udata data;
+
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+ data = katom->udata;
+
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight));
+
+ kbase_tlstream_tl_nret_atom_ctx(katom, kctx);
+ kbase_tlstream_tl_del_atom(katom);
+
+ katom->status = KBASE_JD_ATOM_STATE_UNUSED;
+
+ wake_up(&katom->completed);
+
+ return data;
+}
+
+int kbase_event_pending(struct kbase_context *ctx)
+{
+ KBASE_DEBUG_ASSERT(ctx);
+
+ return (atomic_read(&ctx->event_count) != 0) ||
+ (atomic_read(&ctx->event_closed) != 0);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_pending);
+
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
+{
+ struct kbase_jd_atom *atom;
+
+ KBASE_DEBUG_ASSERT(ctx);
+
+ mutex_lock(&ctx->event_mutex);
+
+ if (list_empty(&ctx->event_list)) {
+ if (!atomic_read(&ctx->event_closed)) {
+ mutex_unlock(&ctx->event_mutex);
+ return -1;
+ }
+
+ /* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
+ mutex_unlock(&ctx->event_mutex);
+ uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
+ memset(&uevent->udata, 0, sizeof(uevent->udata));
+ dev_dbg(ctx->kbdev->dev,
+ "event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
+ BASE_JD_EVENT_DRV_TERMINATED);
+ return 0;
+ }
+
+ /* normal event processing */
+ atomic_dec(&ctx->event_count);
+ atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
+ list_del(ctx->event_list.next);
+
+ mutex_unlock(&ctx->event_mutex);
+
+ dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
+ uevent->event_code = atom->event_code;
+ uevent->atom_number = (atom - ctx->jctx.atoms);
+
+ if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+ kbase_jd_free_external_resources(atom);
+
+ mutex_lock(&ctx->jctx.lock);
+ uevent->udata = kbase_event_process(ctx, atom);
+ mutex_unlock(&ctx->jctx.lock);
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_dequeue);
+
+/**
+ * kbase_event_process_noreport_worker - Worker for processing atoms that do not
+ * return an event but do have external
+ * resources
+ * @data: Work structure
+ */
+static void kbase_event_process_noreport_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
+ work);
+ struct kbase_context *kctx = katom->kctx;
+
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+ kbase_jd_free_external_resources(katom);
+
+ mutex_lock(&kctx->jctx.lock);
+ kbase_event_process(kctx, katom);
+ mutex_unlock(&kctx->jctx.lock);
+}
+
+/**
+ * kbase_event_process_noreport - Process atoms that do not return an event
+ * @kctx: Context pointer
+ * @katom: Atom to be processed
+ *
+ * Atoms that do not have external resources will be processed immediately.
+ * Atoms that do have external resources will be processed on a workqueue, in
+ * order to avoid locking issues.
+ */
+static void kbase_event_process_noreport(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom)
+{
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+ INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
+ queue_work(kctx->event_workq, &katom->work);
+ } else {
+ kbase_event_process(kctx, katom);
+ }
+}
+
+/**
+ * kbase_event_coalesce - Move pending events to the main event list
+ * @kctx: Context pointer
+ *
+ * kctx->event_list and kctx->event_coalesce_count must be protected
+ * by a lock unless this is the last thread using them
+ * (and we're about to terminate the lock).
+ *
+ * Return: The number of pending events moved to the main event list
+ */
+static int kbase_event_coalesce(struct kbase_context *kctx)
+{
+ const int event_count = kctx->event_coalesce_count;
+
+ /* Join the list of pending events onto the tail of the main list
+ and reset it */
+ list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list);
+ kctx->event_coalesce_count = 0;
+
+ /* Return the number of events moved */
+ return event_count;
+}
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
+{
+ if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
+ if (atom->event_code == BASE_JD_EVENT_DONE) {
+ /* Don't report the event */
+ kbase_event_process_noreport(ctx, atom);
+ return;
+ }
+ }
+
+ if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
+ /* Don't report the event */
+ kbase_event_process_noreport(ctx, atom);
+ return;
+ }
+ kbase_tlstream_tl_attrib_atom_state(atom, TL_ATOM_STATE_POSTED);
+ if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) {
+ /* Don't report the event until other event(s) have completed */
+ mutex_lock(&ctx->event_mutex);
+ list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list);
+ ++ctx->event_coalesce_count;
+ mutex_unlock(&ctx->event_mutex);
+ } else {
+ /* Report the event and any pending events now */
+ int event_count = 1;
+
+ mutex_lock(&ctx->event_mutex);
+ event_count += kbase_event_coalesce(ctx);
+ list_add_tail(&atom->dep_item[0], &ctx->event_list);
+ atomic_add(event_count, &ctx->event_count);
+ mutex_unlock(&ctx->event_mutex);
+
+ kbase_event_wakeup(ctx);
+ }
+}
+KBASE_EXPORT_TEST_API(kbase_event_post);
+
+void kbase_event_close(struct kbase_context *kctx)
+{
+ mutex_lock(&kctx->event_mutex);
+ atomic_set(&kctx->event_closed, true);
+ mutex_unlock(&kctx->event_mutex);
+ kbase_event_wakeup(kctx);
+}
+
+int kbase_event_init(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+
+ INIT_LIST_HEAD(&kctx->event_list);
+ INIT_LIST_HEAD(&kctx->event_coalesce_list);
+ mutex_init(&kctx->event_mutex);
+ atomic_set(&kctx->event_count, 0);
+ kctx->event_coalesce_count = 0;
+ atomic_set(&kctx->event_closed, false);
+ kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
+
+ if (NULL == kctx->event_workq)
+ return -EINVAL;
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_init);
+
+void kbase_event_cleanup(struct kbase_context *kctx)
+{
+ int event_count;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(kctx->event_workq);
+
+ flush_workqueue(kctx->event_workq);
+ destroy_workqueue(kctx->event_workq);
+
+ /* We use kbase_event_dequeue to remove the remaining events as that
+ * deals with all the cleanup needed for the atoms.
+ *
+ * Note: use of kctx->event_list without a lock is safe because this must be the last
+ * thread using it (because we're about to terminate the lock)
+ */
+ event_count = kbase_event_coalesce(kctx);
+ atomic_add(event_count, &kctx->event_count);
+
+ while (!list_empty(&kctx->event_list)) {
+ struct base_jd_event_v2 event;
+
+ kbase_event_dequeue(kctx, &event);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_cleanup);
diff --git a/midgard-0.r2p0/mali_kbase_gator.h b/midgard-0.r2p0/mali_kbase_gator.h
new file mode 100755
index 0000000..ce65b55
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gator.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/* NB taken from gator */
+/*
+ * List of possible actions to be controlled by DS-5 Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping
+ * and s/w counter reporting. We cannot use the enums in mali_uk_types.h because
+ * they are unknown inside gator.
+ */
+#ifndef _KBASE_GATOR_H_
+#define _KBASE_GATOR_H_
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+#define GATOR_MAKE_EVENT(type, number) (((type) << 24) | ((number) << 16))
+#define GATOR_JOB_SLOT_START 1
+#define GATOR_JOB_SLOT_STOP 2
+#define GATOR_JOB_SLOT_SOFT_STOPPED 3
+
+void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id);
+void kbase_trace_mali_pm_status(u32 event, u64 value);
+void kbase_trace_mali_pm_power_off(u32 event, u64 value);
+void kbase_trace_mali_pm_power_on(u32 event, u64 value);
+void kbase_trace_mali_page_fault_insert_pages(int event, u32 value);
+void kbase_trace_mali_mmu_as_in_use(int event);
+void kbase_trace_mali_mmu_as_released(int event);
+void kbase_trace_mali_total_alloc_pages_change(long long int event);
+
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+#endif /* _KBASE_GATOR_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_gator_api.c b/midgard-0.r2p0/mali_kbase_gator_api.c
new file mode 100755
index 0000000..3292fa9
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gator_api.c
@@ -0,0 +1,330 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+#include "mali_kbase_mem_linux.h"
+#include "mali_kbase_gator_api.h"
+#include "mali_kbase_gator_hwcnt_names.h"
+
+#define MALI_MAX_CORES_PER_GROUP 4
+#define MALI_MAX_NUM_BLOCKS_PER_GROUP 8
+#define MALI_COUNTERS_PER_BLOCK 64
+#define MALI_BYTES_PER_COUNTER 4
+
+struct kbase_gator_hwcnt_handles {
+ struct kbase_device *kbdev;
+ struct kbase_vinstr_client *vinstr_cli;
+ void *vinstr_buffer;
+ struct work_struct dump_work;
+ int dump_complete;
+ spinlock_t dump_lock;
+};
+
+static void dump_worker(struct work_struct *work);
+
+const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters)
+{
+ const char * const *hardware_counters;
+ struct kbase_device *kbdev;
+ uint32_t product_id;
+ uint32_t count;
+
+ if (!total_counters)
+ return NULL;
+
+ /* Get the first device - it doesn't matter in this case */
+ kbdev = kbase_find_device(-1);
+ if (!kbdev)
+ return NULL;
+
+ product_id = kbdev->gpu_props.props.core_props.product_id;
+
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ switch (GPU_ID2_MODEL_MATCH_VALUE(product_id)) {
+ case GPU_ID2_PRODUCT_TMIX:
+ hardware_counters = hardware_counters_mali_tMIx;
+ count = ARRAY_SIZE(hardware_counters_mali_tMIx);
+ break;
+ case GPU_ID2_PRODUCT_THEX:
+ hardware_counters = hardware_counters_mali_tHEx;
+ count = ARRAY_SIZE(hardware_counters_mali_tHEx);
+ break;
+ default:
+ hardware_counters = NULL;
+ count = 0;
+ dev_err(kbdev->dev, "Unrecognized product ID: %u\n",
+ product_id);
+ break;
+ }
+ } else {
+ switch (product_id) {
+ /* If we are using a Mali-T60x device */
+ case GPU_ID_PI_T60X:
+ hardware_counters = hardware_counters_mali_t60x;
+ count = ARRAY_SIZE(hardware_counters_mali_t60x);
+ break;
+ /* If we are using a Mali-T62x device */
+ case GPU_ID_PI_T62X:
+ hardware_counters = hardware_counters_mali_t62x;
+ count = ARRAY_SIZE(hardware_counters_mali_t62x);
+ break;
+ /* If we are using a Mali-T72x device */
+ case GPU_ID_PI_T72X:
+ hardware_counters = hardware_counters_mali_t72x;
+ count = ARRAY_SIZE(hardware_counters_mali_t72x);
+ break;
+ /* If we are using a Mali-T76x device */
+ case GPU_ID_PI_T76X:
+ hardware_counters = hardware_counters_mali_t76x;
+ count = ARRAY_SIZE(hardware_counters_mali_t76x);
+ break;
+ /* If we are using a Mali-T82x device */
+ case GPU_ID_PI_T82X:
+ hardware_counters = hardware_counters_mali_t82x;
+ count = ARRAY_SIZE(hardware_counters_mali_t82x);
+ break;
+ /* If we are using a Mali-T83x device */
+ case GPU_ID_PI_T83X:
+ hardware_counters = hardware_counters_mali_t83x;
+ count = ARRAY_SIZE(hardware_counters_mali_t83x);
+ break;
+ /* If we are using a Mali-T86x device */
+ case GPU_ID_PI_T86X:
+ hardware_counters = hardware_counters_mali_t86x;
+ count = ARRAY_SIZE(hardware_counters_mali_t86x);
+ break;
+ /* If we are using a Mali-T88x device */
+ case GPU_ID_PI_TFRX:
+ hardware_counters = hardware_counters_mali_t88x;
+ count = ARRAY_SIZE(hardware_counters_mali_t88x);
+ break;
+ default:
+ hardware_counters = NULL;
+ count = 0;
+ dev_err(kbdev->dev, "Unrecognized product ID: %u\n",
+ product_id);
+ break;
+ }
+ }
+
+ /* Release the kbdev reference. */
+ kbase_release_device(kbdev);
+
+ *total_counters = count;
+
+ /* If we return a string array take a reference on the module (or fail). */
+ if (hardware_counters && !try_module_get(THIS_MODULE))
+ return NULL;
+
+ return hardware_counters;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init_names);
+
+void kbase_gator_hwcnt_term_names(void)
+{
+ /* Release the module reference. */
+ module_put(THIS_MODULE);
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term_names);
+
+struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info)
+{
+ struct kbase_gator_hwcnt_handles *hand;
+ struct kbase_uk_hwcnt_reader_setup setup;
+ uint32_t dump_size = 0, i = 0;
+
+ if (!in_out_info)
+ return NULL;
+
+ hand = kzalloc(sizeof(*hand), GFP_KERNEL);
+ if (!hand)
+ return NULL;
+
+ INIT_WORK(&hand->dump_work, dump_worker);
+ spin_lock_init(&hand->dump_lock);
+
+ /* Get the first device */
+ hand->kbdev = kbase_find_device(-1);
+ if (!hand->kbdev)
+ goto free_hand;
+
+ dump_size = kbase_vinstr_dump_size(hand->kbdev);
+ hand->vinstr_buffer = kzalloc(dump_size, GFP_KERNEL);
+ if (!hand->vinstr_buffer)
+ goto release_device;
+ in_out_info->kernel_dump_buffer = hand->vinstr_buffer;
+
+ in_out_info->nr_cores = hand->kbdev->gpu_props.num_cores;
+ in_out_info->nr_core_groups = hand->kbdev->gpu_props.num_core_groups;
+ in_out_info->gpu_id = hand->kbdev->gpu_props.props.core_props.product_id;
+
+ /* If we are using a v4 device (Mali-T6xx or Mali-T72x) */
+ if (kbase_hw_has_feature(hand->kbdev, BASE_HW_FEATURE_V4)) {
+ uint32_t cg, j;
+ uint64_t core_mask;
+
+ /* There are 8 hardware counters blocks per core group */
+ in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) *
+ MALI_MAX_NUM_BLOCKS_PER_GROUP *
+ in_out_info->nr_core_groups, GFP_KERNEL);
+
+ if (!in_out_info->hwc_layout)
+ goto free_vinstr_buffer;
+
+ dump_size = in_out_info->nr_core_groups *
+ MALI_MAX_NUM_BLOCKS_PER_GROUP *
+ MALI_COUNTERS_PER_BLOCK *
+ MALI_BYTES_PER_COUNTER;
+
+ for (cg = 0; cg < in_out_info->nr_core_groups; cg++) {
+ core_mask = hand->kbdev->gpu_props.props.coherency_info.group[cg].core_mask;
+
+ for (j = 0; j < MALI_MAX_CORES_PER_GROUP; j++) {
+ if (core_mask & (1u << j))
+ in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+ else
+ in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+ }
+
+ in_out_info->hwc_layout[i++] = TILER_BLOCK;
+ in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+ in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+
+ if (0 == cg)
+ in_out_info->hwc_layout[i++] = JM_BLOCK;
+ else
+ in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+ }
+ /* If we are using any other device */
+ } else {
+ uint32_t nr_l2, nr_sc_bits, j;
+ uint64_t core_mask;
+
+ nr_l2 = hand->kbdev->gpu_props.props.l2_props.num_l2_slices;
+
+ core_mask = hand->kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+
+ nr_sc_bits = fls64(core_mask);
+
+ /* The job manager and tiler sets of counters
+ * are always present */
+ in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * (2 + nr_sc_bits + nr_l2), GFP_KERNEL);
+
+ if (!in_out_info->hwc_layout)
+ goto free_vinstr_buffer;
+
+ dump_size = (2 + nr_sc_bits + nr_l2) * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER;
+
+ in_out_info->hwc_layout[i++] = JM_BLOCK;
+ in_out_info->hwc_layout[i++] = TILER_BLOCK;
+
+ for (j = 0; j < nr_l2; j++)
+ in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+ while (core_mask != 0ull) {
+ if ((core_mask & 1ull) != 0ull)
+ in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+ else
+ in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+ core_mask >>= 1;
+ }
+ }
+
+ in_out_info->nr_hwc_blocks = i;
+ in_out_info->size = dump_size;
+
+ setup.jm_bm = in_out_info->bitmask[0];
+ setup.tiler_bm = in_out_info->bitmask[1];
+ setup.shader_bm = in_out_info->bitmask[2];
+ setup.mmu_l2_bm = in_out_info->bitmask[3];
+ hand->vinstr_cli = kbase_vinstr_hwcnt_kernel_setup(hand->kbdev->vinstr_ctx,
+ &setup, hand->vinstr_buffer);
+ if (!hand->vinstr_cli) {
+ dev_err(hand->kbdev->dev, "Failed to register gator with vinstr core");
+ goto free_layout;
+ }
+
+ return hand;
+
+free_layout:
+ kfree(in_out_info->hwc_layout);
+
+free_vinstr_buffer:
+ kfree(hand->vinstr_buffer);
+
+release_device:
+ kbase_release_device(hand->kbdev);
+
+free_hand:
+ kfree(hand);
+ return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init);
+
+void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+ if (in_out_info)
+ kfree(in_out_info->hwc_layout);
+
+ if (opaque_handles) {
+ cancel_work_sync(&opaque_handles->dump_work);
+ kbase_vinstr_detach_client(opaque_handles->vinstr_cli);
+ kfree(opaque_handles->vinstr_buffer);
+ kbase_release_device(opaque_handles->kbdev);
+ kfree(opaque_handles);
+ }
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term);
+
+static void dump_worker(struct work_struct *work)
+{
+ struct kbase_gator_hwcnt_handles *hand;
+
+ hand = container_of(work, struct kbase_gator_hwcnt_handles, dump_work);
+ if (!kbase_vinstr_hwc_dump(hand->vinstr_cli,
+ BASE_HWCNT_READER_EVENT_MANUAL)) {
+ spin_lock_bh(&hand->dump_lock);
+ hand->dump_complete = 1;
+ spin_unlock_bh(&hand->dump_lock);
+ } else {
+ schedule_work(&hand->dump_work);
+ }
+}
+
+uint32_t kbase_gator_instr_hwcnt_dump_complete(
+ struct kbase_gator_hwcnt_handles *opaque_handles,
+ uint32_t * const success)
+{
+
+ if (opaque_handles && success) {
+ *success = opaque_handles->dump_complete;
+ opaque_handles->dump_complete = 0;
+ return *success;
+ }
+ return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_complete);
+
+uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+ if (opaque_handles)
+ schedule_work(&opaque_handles->dump_work);
+ return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_irq);
diff --git a/midgard-0.r2p0/mali_kbase_gator_api.h b/midgard-0.r2p0/mali_kbase_gator_api.h
new file mode 100755
index 0000000..ef9ac0f
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gator_api.h
@@ -0,0 +1,219 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_GATOR_API_H_
+#define _KBASE_GATOR_API_H_
+
+/**
+ * @brief This file describes the API used by Gator to fetch hardware counters.
+ */
+
+/* This define is used by the gator kernel module compile to select which DDK
+ * API calling convention to use. If not defined (legacy DDK) gator assumes
+ * version 1. The version to DDK release mapping is:
+ * Version 1 API: DDK versions r1px, r2px
+ * Version 2 API: DDK versions r3px, r4px
+ * Version 3 API: DDK version r5p0 and newer
+ *
+ * API Usage
+ * =========
+ *
+ * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter
+ * names for the GPU present in this device.
+ *
+ * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for
+ * the counters you want enabled. The enables can all be set for simplicity in
+ * most use cases, but disabling some will let you minimize bandwidth impact.
+ *
+ * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a
+ * counter context. On successful return the DDK will have populated the
+ * structure with a variety of useful information.
+ *
+ * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a
+ * counter dump. If this returns a non-zero value the request has been queued,
+ * otherwise the driver has been unable to do so (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the previously
+ * requested dump has been succesful. If this returns non-zero the counter dump
+ * has resolved, but the value of *success must also be tested as the dump
+ * may have not been successful. If it returns zero the counter dump was
+ * abandoned due to the device being busy (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 6] Process the counters stored in the buffer pointed to by ...
+ *
+ * kbase_gator_hwcnt_info->kernel_dump_buffer
+ *
+ * In pseudo code you can find all of the counters via this approach:
+ *
+ *
+ * hwcnt_info # pointer to kbase_gator_hwcnt_info structure
+ * hwcnt_name # pointer to name list
+ *
+ * u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer
+ *
+ * # Iterate over each 64-counter block in this GPU configuration
+ * for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) {
+ * hwc_type type = hwcnt_info->hwc_layout[i];
+ *
+ * # Skip reserved type blocks - they contain no counters at all
+ * if( type == RESERVED_BLOCK ) {
+ * continue;
+ * }
+ *
+ * size_t name_offset = type * 64;
+ * size_t data_offset = i * 64;
+ *
+ * # Iterate over the names of the counters in this block type
+ * for( j = 0; j < 64; j++) {
+ * const char * name = hwcnt_name[name_offset+j];
+ *
+ * # Skip empty name strings - there is no counter here
+ * if( name[0] == '\0' ) {
+ * continue;
+ * }
+ *
+ * u32 data = hwcnt_data[data_offset+j];
+ *
+ * printk( "COUNTER: %s DATA: %u\n", name, data );
+ * }
+ * }
+ *
+ *
+ * Note that in most implementations you typically want to either SUM or
+ * AVERAGE multiple instances of the same counter if, for example, you have
+ * multiple shader cores or multiple L2 caches. The most sensible view for
+ * analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU
+ * counters.
+ *
+ * 7] Goto 4, repeating until you want to stop collecting counters.
+ *
+ * 8] Release the dump resources by calling kbase_gator_hwcnt_term().
+ *
+ * 9] Release the name table resources by calling
+ * kbase_gator_hwcnt_term_names(). This function must only be called if
+ * init_names() returned a non-NULL value.
+ **/
+
+#define MALI_DDK_GATOR_API_VERSION 3
+
+enum hwc_type {
+ JM_BLOCK = 0,
+ TILER_BLOCK,
+ SHADER_BLOCK,
+ MMU_L2_BLOCK,
+ RESERVED_BLOCK
+};
+
+struct kbase_gator_hwcnt_info {
+ /* Passed from Gator to kbase */
+
+ /* the bitmask of enabled hardware counters for each counter block */
+ uint16_t bitmask[4];
+
+ /* Passed from kbase to Gator */
+
+ /* ptr to counter dump memory */
+ void *kernel_dump_buffer;
+
+ /* size of counter dump memory */
+ uint32_t size;
+
+ /* the ID of the Mali device */
+ uint32_t gpu_id;
+
+ /* the number of shader cores in the GPU */
+ uint32_t nr_cores;
+
+ /* the number of core groups */
+ uint32_t nr_core_groups;
+
+ /* the memory layout of the performance counters */
+ enum hwc_type *hwc_layout;
+
+ /* the total number of hardware couter blocks */
+ uint32_t nr_hwc_blocks;
+};
+
+/**
+ * @brief Opaque block of Mali data which Gator needs to return to the API later.
+ */
+struct kbase_gator_hwcnt_handles;
+
+/**
+ * @brief Initialize the resources Gator needs for performance profiling.
+ *
+ * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the Mali
+ * specific information that will be returned to Gator. On entry Gator must have populated the
+ * 'bitmask' field with the counters it wishes to enable for each class of counter block.
+ * Each entry in the array corresponds to a single counter class based on the "hwc_type"
+ * enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables
+ * the first 4 counters in the block, and so on). See the GPU counter array as returned by
+ * kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU.
+ *
+ * @return Pointer to an opaque handle block on success, NULL on error.
+ */
+extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info);
+
+/**
+ * @brief Free all resources once Gator has finished using performance counters.
+ *
+ * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the
+ * Mali specific information that will be returned to Gator.
+ * @param opaque_handles A wrapper structure for kbase structures.
+ */
+extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief Poll whether a counter dump is successful.
+ *
+ * @param opaque_handles A wrapper structure for kbase structures.
+ * @param[out] success Non-zero on success, zero on failure.
+ *
+ * @return Zero if the dump is still pending, non-zero if the dump has completed. Note that a
+ * completed dump may not have dumped succesfully, so the caller must test for both
+ * a completed and successful dump before processing counters.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success);
+
+/**
+ * @brief Request the generation of a new counter dump.
+ *
+ * @param opaque_handles A wrapper structure for kbase structures.
+ *
+ * @return Zero if the hardware device is busy and cannot handle the request, non-zero otherwise.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief This function is used to fetch the names table based on the Mali device in use.
+ *
+ * @param[out] total_counters The total number of counters short names in the Mali devices' list.
+ *
+ * @return Pointer to an array of strings of length *total_counters.
+ */
+extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters);
+
+/**
+ * @brief This function is used to terminate the use of the names table.
+ *
+ * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value.
+ */
+extern void kbase_gator_hwcnt_term_names(void);
+
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_gator_hwcnt_names.h b/midgard-0.r2p0/mali_kbase_gator_hwcnt_names.h
new file mode 100755
index 0000000..7ec05c1
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gator_hwcnt_names.h
@@ -0,0 +1,2164 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_H_
+#define _KBASE_GATOR_HWCNT_NAMES_H_
+
+/*
+ * "Short names" for hardware counters used by Streamline. Counters names are
+ * stored in accordance with their memory layout in the binary counter block
+ * emitted by the Mali GPU. Each "master" in the GPU emits a fixed-size block
+ * of 64 counters, and each GPU implements the same set of "masters" although
+ * the counters each master exposes within its block of 64 may vary.
+ *
+ * Counters which are an empty string are simply "holes" in the counter memory
+ * where no counter exists.
+ */
+
+static const char * const hardware_counters_mali_t60x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T60x_MESSAGES_SENT",
+ "T60x_MESSAGES_RECEIVED",
+ "T60x_GPU_ACTIVE",
+ "T60x_IRQ_ACTIVE",
+ "T60x_JS0_JOBS",
+ "T60x_JS0_TASKS",
+ "T60x_JS0_ACTIVE",
+ "",
+ "T60x_JS0_WAIT_READ",
+ "T60x_JS0_WAIT_ISSUE",
+ "T60x_JS0_WAIT_DEPEND",
+ "T60x_JS0_WAIT_FINISH",
+ "T60x_JS1_JOBS",
+ "T60x_JS1_TASKS",
+ "T60x_JS1_ACTIVE",
+ "",
+ "T60x_JS1_WAIT_READ",
+ "T60x_JS1_WAIT_ISSUE",
+ "T60x_JS1_WAIT_DEPEND",
+ "T60x_JS1_WAIT_FINISH",
+ "T60x_JS2_JOBS",
+ "T60x_JS2_TASKS",
+ "T60x_JS2_ACTIVE",
+ "",
+ "T60x_JS2_WAIT_READ",
+ "T60x_JS2_WAIT_ISSUE",
+ "T60x_JS2_WAIT_DEPEND",
+ "T60x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T60x_TI_JOBS_PROCESSED",
+ "T60x_TI_TRIANGLES",
+ "T60x_TI_QUADS",
+ "T60x_TI_POLYGONS",
+ "T60x_TI_POINTS",
+ "T60x_TI_LINES",
+ "T60x_TI_VCACHE_HIT",
+ "T60x_TI_VCACHE_MISS",
+ "T60x_TI_FRONT_FACING",
+ "T60x_TI_BACK_FACING",
+ "T60x_TI_PRIM_VISIBLE",
+ "T60x_TI_PRIM_CULLED",
+ "T60x_TI_PRIM_CLIPPED",
+ "T60x_TI_LEVEL0",
+ "T60x_TI_LEVEL1",
+ "T60x_TI_LEVEL2",
+ "T60x_TI_LEVEL3",
+ "T60x_TI_LEVEL4",
+ "T60x_TI_LEVEL5",
+ "T60x_TI_LEVEL6",
+ "T60x_TI_LEVEL7",
+ "T60x_TI_COMMAND_1",
+ "T60x_TI_COMMAND_2",
+ "T60x_TI_COMMAND_3",
+ "T60x_TI_COMMAND_4",
+ "T60x_TI_COMMAND_4_7",
+ "T60x_TI_COMMAND_8_15",
+ "T60x_TI_COMMAND_16_63",
+ "T60x_TI_COMMAND_64",
+ "T60x_TI_COMPRESS_IN",
+ "T60x_TI_COMPRESS_OUT",
+ "T60x_TI_COMPRESS_FLUSH",
+ "T60x_TI_TIMESTAMPS",
+ "T60x_TI_PCACHE_HIT",
+ "T60x_TI_PCACHE_MISS",
+ "T60x_TI_PCACHE_LINE",
+ "T60x_TI_PCACHE_STALL",
+ "T60x_TI_WRBUF_HIT",
+ "T60x_TI_WRBUF_MISS",
+ "T60x_TI_WRBUF_LINE",
+ "T60x_TI_WRBUF_PARTIAL",
+ "T60x_TI_WRBUF_STALL",
+ "T60x_TI_ACTIVE",
+ "T60x_TI_LOADING_DESC",
+ "T60x_TI_INDEX_WAIT",
+ "T60x_TI_INDEX_RANGE_WAIT",
+ "T60x_TI_VERTEX_WAIT",
+ "T60x_TI_PCACHE_WAIT",
+ "T60x_TI_WRBUF_WAIT",
+ "T60x_TI_BUS_READ",
+ "T60x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T60x_TI_UTLB_STALL",
+ "T60x_TI_UTLB_REPLAY_MISS",
+ "T60x_TI_UTLB_REPLAY_FULL",
+ "T60x_TI_UTLB_NEW_MISS",
+ "T60x_TI_UTLB_HIT",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T60x_FRAG_ACTIVE",
+ "T60x_FRAG_PRIMITIVES",
+ "T60x_FRAG_PRIMITIVES_DROPPED",
+ "T60x_FRAG_CYCLES_DESC",
+ "T60x_FRAG_CYCLES_PLR",
+ "T60x_FRAG_CYCLES_VERT",
+ "T60x_FRAG_CYCLES_TRISETUP",
+ "T60x_FRAG_CYCLES_RAST",
+ "T60x_FRAG_THREADS",
+ "T60x_FRAG_DUMMY_THREADS",
+ "T60x_FRAG_QUADS_RAST",
+ "T60x_FRAG_QUADS_EZS_TEST",
+ "T60x_FRAG_QUADS_EZS_KILLED",
+ "T60x_FRAG_THREADS_LZS_TEST",
+ "T60x_FRAG_THREADS_LZS_KILLED",
+ "T60x_FRAG_CYCLES_NO_TILE",
+ "T60x_FRAG_NUM_TILES",
+ "T60x_FRAG_TRANS_ELIM",
+ "T60x_COMPUTE_ACTIVE",
+ "T60x_COMPUTE_TASKS",
+ "T60x_COMPUTE_THREADS",
+ "T60x_COMPUTE_CYCLES_DESC",
+ "T60x_TRIPIPE_ACTIVE",
+ "T60x_ARITH_WORDS",
+ "T60x_ARITH_CYCLES_REG",
+ "T60x_ARITH_CYCLES_L0",
+ "T60x_ARITH_FRAG_DEPEND",
+ "T60x_LS_WORDS",
+ "T60x_LS_ISSUES",
+ "T60x_LS_RESTARTS",
+ "T60x_LS_REISSUES_MISS",
+ "T60x_LS_REISSUES_VD",
+ "T60x_LS_REISSUE_ATTRIB_MISS",
+ "T60x_LS_NO_WB",
+ "T60x_TEX_WORDS",
+ "T60x_TEX_BUBBLES",
+ "T60x_TEX_WORDS_L0",
+ "T60x_TEX_WORDS_DESC",
+ "T60x_TEX_ISSUES",
+ "T60x_TEX_RECIRC_FMISS",
+ "T60x_TEX_RECIRC_DESC",
+ "T60x_TEX_RECIRC_MULTI",
+ "T60x_TEX_RECIRC_PMISS",
+ "T60x_TEX_RECIRC_CONF",
+ "T60x_LSC_READ_HITS",
+ "T60x_LSC_READ_MISSES",
+ "T60x_LSC_WRITE_HITS",
+ "T60x_LSC_WRITE_MISSES",
+ "T60x_LSC_ATOMIC_HITS",
+ "T60x_LSC_ATOMIC_MISSES",
+ "T60x_LSC_LINE_FETCHES",
+ "T60x_LSC_DIRTY_LINE",
+ "T60x_LSC_SNOOPS",
+ "T60x_AXI_TLB_STALL",
+ "T60x_AXI_TLB_MISS",
+ "T60x_AXI_TLB_TRANSACTION",
+ "T60x_LS_TLB_MISS",
+ "T60x_LS_TLB_HIT",
+ "T60x_AXI_BEATS_READ",
+ "T60x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T60x_MMU_HIT",
+ "T60x_MMU_NEW_MISS",
+ "T60x_MMU_REPLAY_FULL",
+ "T60x_MMU_REPLAY_MISS",
+ "T60x_MMU_TABLE_WALK",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T60x_UTLB_HIT",
+ "T60x_UTLB_NEW_MISS",
+ "T60x_UTLB_REPLAY_FULL",
+ "T60x_UTLB_REPLAY_MISS",
+ "T60x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T60x_L2_EXT_WRITE_BEATS",
+ "T60x_L2_EXT_READ_BEATS",
+ "T60x_L2_ANY_LOOKUP",
+ "T60x_L2_READ_LOOKUP",
+ "T60x_L2_SREAD_LOOKUP",
+ "T60x_L2_READ_REPLAY",
+ "T60x_L2_READ_SNOOP",
+ "T60x_L2_READ_HIT",
+ "T60x_L2_CLEAN_MISS",
+ "T60x_L2_WRITE_LOOKUP",
+ "T60x_L2_SWRITE_LOOKUP",
+ "T60x_L2_WRITE_REPLAY",
+ "T60x_L2_WRITE_SNOOP",
+ "T60x_L2_WRITE_HIT",
+ "T60x_L2_EXT_READ_FULL",
+ "T60x_L2_EXT_READ_HALF",
+ "T60x_L2_EXT_WRITE_FULL",
+ "T60x_L2_EXT_WRITE_HALF",
+ "T60x_L2_EXT_READ",
+ "T60x_L2_EXT_READ_LINE",
+ "T60x_L2_EXT_WRITE",
+ "T60x_L2_EXT_WRITE_LINE",
+ "T60x_L2_EXT_WRITE_SMALL",
+ "T60x_L2_EXT_BARRIER",
+ "T60x_L2_EXT_AR_STALL",
+ "T60x_L2_EXT_R_BUF_FULL",
+ "T60x_L2_EXT_RD_BUF_FULL",
+ "T60x_L2_EXT_R_RAW",
+ "T60x_L2_EXT_W_STALL",
+ "T60x_L2_EXT_W_BUF_FULL",
+ "T60x_L2_EXT_R_W_HAZARD",
+ "T60x_L2_TAG_HAZARD",
+ "T60x_L2_SNOOP_FULL",
+ "T60x_L2_REPLAY_FULL"
+};
+static const char * const hardware_counters_mali_t62x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T62x_MESSAGES_SENT",
+ "T62x_MESSAGES_RECEIVED",
+ "T62x_GPU_ACTIVE",
+ "T62x_IRQ_ACTIVE",
+ "T62x_JS0_JOBS",
+ "T62x_JS0_TASKS",
+ "T62x_JS0_ACTIVE",
+ "",
+ "T62x_JS0_WAIT_READ",
+ "T62x_JS0_WAIT_ISSUE",
+ "T62x_JS0_WAIT_DEPEND",
+ "T62x_JS0_WAIT_FINISH",
+ "T62x_JS1_JOBS",
+ "T62x_JS1_TASKS",
+ "T62x_JS1_ACTIVE",
+ "",
+ "T62x_JS1_WAIT_READ",
+ "T62x_JS1_WAIT_ISSUE",
+ "T62x_JS1_WAIT_DEPEND",
+ "T62x_JS1_WAIT_FINISH",
+ "T62x_JS2_JOBS",
+ "T62x_JS2_TASKS",
+ "T62x_JS2_ACTIVE",
+ "",
+ "T62x_JS2_WAIT_READ",
+ "T62x_JS2_WAIT_ISSUE",
+ "T62x_JS2_WAIT_DEPEND",
+ "T62x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T62x_TI_JOBS_PROCESSED",
+ "T62x_TI_TRIANGLES",
+ "T62x_TI_QUADS",
+ "T62x_TI_POLYGONS",
+ "T62x_TI_POINTS",
+ "T62x_TI_LINES",
+ "T62x_TI_VCACHE_HIT",
+ "T62x_TI_VCACHE_MISS",
+ "T62x_TI_FRONT_FACING",
+ "T62x_TI_BACK_FACING",
+ "T62x_TI_PRIM_VISIBLE",
+ "T62x_TI_PRIM_CULLED",
+ "T62x_TI_PRIM_CLIPPED",
+ "T62x_TI_LEVEL0",
+ "T62x_TI_LEVEL1",
+ "T62x_TI_LEVEL2",
+ "T62x_TI_LEVEL3",
+ "T62x_TI_LEVEL4",
+ "T62x_TI_LEVEL5",
+ "T62x_TI_LEVEL6",
+ "T62x_TI_LEVEL7",
+ "T62x_TI_COMMAND_1",
+ "T62x_TI_COMMAND_2",
+ "T62x_TI_COMMAND_3",
+ "T62x_TI_COMMAND_4",
+ "T62x_TI_COMMAND_5_7",
+ "T62x_TI_COMMAND_8_15",
+ "T62x_TI_COMMAND_16_63",
+ "T62x_TI_COMMAND_64",
+ "T62x_TI_COMPRESS_IN",
+ "T62x_TI_COMPRESS_OUT",
+ "T62x_TI_COMPRESS_FLUSH",
+ "T62x_TI_TIMESTAMPS",
+ "T62x_TI_PCACHE_HIT",
+ "T62x_TI_PCACHE_MISS",
+ "T62x_TI_PCACHE_LINE",
+ "T62x_TI_PCACHE_STALL",
+ "T62x_TI_WRBUF_HIT",
+ "T62x_TI_WRBUF_MISS",
+ "T62x_TI_WRBUF_LINE",
+ "T62x_TI_WRBUF_PARTIAL",
+ "T62x_TI_WRBUF_STALL",
+ "T62x_TI_ACTIVE",
+ "T62x_TI_LOADING_DESC",
+ "T62x_TI_INDEX_WAIT",
+ "T62x_TI_INDEX_RANGE_WAIT",
+ "T62x_TI_VERTEX_WAIT",
+ "T62x_TI_PCACHE_WAIT",
+ "T62x_TI_WRBUF_WAIT",
+ "T62x_TI_BUS_READ",
+ "T62x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T62x_TI_UTLB_STALL",
+ "T62x_TI_UTLB_REPLAY_MISS",
+ "T62x_TI_UTLB_REPLAY_FULL",
+ "T62x_TI_UTLB_NEW_MISS",
+ "T62x_TI_UTLB_HIT",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "T62x_SHADER_CORE_ACTIVE",
+ "T62x_FRAG_ACTIVE",
+ "T62x_FRAG_PRIMITIVES",
+ "T62x_FRAG_PRIMITIVES_DROPPED",
+ "T62x_FRAG_CYCLES_DESC",
+ "T62x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T62x_FRAG_CYCLES_VERT",
+ "T62x_FRAG_CYCLES_TRISETUP",
+ "T62x_FRAG_CYCLES_EZS_ACTIVE",
+ "T62x_FRAG_THREADS",
+ "T62x_FRAG_DUMMY_THREADS",
+ "T62x_FRAG_QUADS_RAST",
+ "T62x_FRAG_QUADS_EZS_TEST",
+ "T62x_FRAG_QUADS_EZS_KILLED",
+ "T62x_FRAG_THREADS_LZS_TEST",
+ "T62x_FRAG_THREADS_LZS_KILLED",
+ "T62x_FRAG_CYCLES_NO_TILE",
+ "T62x_FRAG_NUM_TILES",
+ "T62x_FRAG_TRANS_ELIM",
+ "T62x_COMPUTE_ACTIVE",
+ "T62x_COMPUTE_TASKS",
+ "T62x_COMPUTE_THREADS",
+ "T62x_COMPUTE_CYCLES_DESC",
+ "T62x_TRIPIPE_ACTIVE",
+ "T62x_ARITH_WORDS",
+ "T62x_ARITH_CYCLES_REG",
+ "T62x_ARITH_CYCLES_L0",
+ "T62x_ARITH_FRAG_DEPEND",
+ "T62x_LS_WORDS",
+ "T62x_LS_ISSUES",
+ "T62x_LS_RESTARTS",
+ "T62x_LS_REISSUES_MISS",
+ "T62x_LS_REISSUES_VD",
+ "T62x_LS_REISSUE_ATTRIB_MISS",
+ "T62x_LS_NO_WB",
+ "T62x_TEX_WORDS",
+ "T62x_TEX_BUBBLES",
+ "T62x_TEX_WORDS_L0",
+ "T62x_TEX_WORDS_DESC",
+ "T62x_TEX_ISSUES",
+ "T62x_TEX_RECIRC_FMISS",
+ "T62x_TEX_RECIRC_DESC",
+ "T62x_TEX_RECIRC_MULTI",
+ "T62x_TEX_RECIRC_PMISS",
+ "T62x_TEX_RECIRC_CONF",
+ "T62x_LSC_READ_HITS",
+ "T62x_LSC_READ_MISSES",
+ "T62x_LSC_WRITE_HITS",
+ "T62x_LSC_WRITE_MISSES",
+ "T62x_LSC_ATOMIC_HITS",
+ "T62x_LSC_ATOMIC_MISSES",
+ "T62x_LSC_LINE_FETCHES",
+ "T62x_LSC_DIRTY_LINE",
+ "T62x_LSC_SNOOPS",
+ "T62x_AXI_TLB_STALL",
+ "T62x_AXI_TLB_MISS",
+ "T62x_AXI_TLB_TRANSACTION",
+ "T62x_LS_TLB_MISS",
+ "T62x_LS_TLB_HIT",
+ "T62x_AXI_BEATS_READ",
+ "T62x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T62x_MMU_HIT",
+ "T62x_MMU_NEW_MISS",
+ "T62x_MMU_REPLAY_FULL",
+ "T62x_MMU_REPLAY_MISS",
+ "T62x_MMU_TABLE_WALK",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T62x_UTLB_HIT",
+ "T62x_UTLB_NEW_MISS",
+ "T62x_UTLB_REPLAY_FULL",
+ "T62x_UTLB_REPLAY_MISS",
+ "T62x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T62x_L2_EXT_WRITE_BEATS",
+ "T62x_L2_EXT_READ_BEATS",
+ "T62x_L2_ANY_LOOKUP",
+ "T62x_L2_READ_LOOKUP",
+ "T62x_L2_SREAD_LOOKUP",
+ "T62x_L2_READ_REPLAY",
+ "T62x_L2_READ_SNOOP",
+ "T62x_L2_READ_HIT",
+ "T62x_L2_CLEAN_MISS",
+ "T62x_L2_WRITE_LOOKUP",
+ "T62x_L2_SWRITE_LOOKUP",
+ "T62x_L2_WRITE_REPLAY",
+ "T62x_L2_WRITE_SNOOP",
+ "T62x_L2_WRITE_HIT",
+ "T62x_L2_EXT_READ_FULL",
+ "T62x_L2_EXT_READ_HALF",
+ "T62x_L2_EXT_WRITE_FULL",
+ "T62x_L2_EXT_WRITE_HALF",
+ "T62x_L2_EXT_READ",
+ "T62x_L2_EXT_READ_LINE",
+ "T62x_L2_EXT_WRITE",
+ "T62x_L2_EXT_WRITE_LINE",
+ "T62x_L2_EXT_WRITE_SMALL",
+ "T62x_L2_EXT_BARRIER",
+ "T62x_L2_EXT_AR_STALL",
+ "T62x_L2_EXT_R_BUF_FULL",
+ "T62x_L2_EXT_RD_BUF_FULL",
+ "T62x_L2_EXT_R_RAW",
+ "T62x_L2_EXT_W_STALL",
+ "T62x_L2_EXT_W_BUF_FULL",
+ "T62x_L2_EXT_R_W_HAZARD",
+ "T62x_L2_TAG_HAZARD",
+ "T62x_L2_SNOOP_FULL",
+ "T62x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t72x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T72x_GPU_ACTIVE",
+ "T72x_IRQ_ACTIVE",
+ "T72x_JS0_JOBS",
+ "T72x_JS0_TASKS",
+ "T72x_JS0_ACTIVE",
+ "T72x_JS1_JOBS",
+ "T72x_JS1_TASKS",
+ "T72x_JS1_ACTIVE",
+ "T72x_JS2_JOBS",
+ "T72x_JS2_TASKS",
+ "T72x_JS2_ACTIVE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T72x_TI_JOBS_PROCESSED",
+ "T72x_TI_TRIANGLES",
+ "T72x_TI_QUADS",
+ "T72x_TI_POLYGONS",
+ "T72x_TI_POINTS",
+ "T72x_TI_LINES",
+ "T72x_TI_FRONT_FACING",
+ "T72x_TI_BACK_FACING",
+ "T72x_TI_PRIM_VISIBLE",
+ "T72x_TI_PRIM_CULLED",
+ "T72x_TI_PRIM_CLIPPED",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T72x_TI_ACTIVE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T72x_FRAG_ACTIVE",
+ "T72x_FRAG_PRIMITIVES",
+ "T72x_FRAG_PRIMITIVES_DROPPED",
+ "T72x_FRAG_THREADS",
+ "T72x_FRAG_DUMMY_THREADS",
+ "T72x_FRAG_QUADS_RAST",
+ "T72x_FRAG_QUADS_EZS_TEST",
+ "T72x_FRAG_QUADS_EZS_KILLED",
+ "T72x_FRAG_THREADS_LZS_TEST",
+ "T72x_FRAG_THREADS_LZS_KILLED",
+ "T72x_FRAG_CYCLES_NO_TILE",
+ "T72x_FRAG_NUM_TILES",
+ "T72x_FRAG_TRANS_ELIM",
+ "T72x_COMPUTE_ACTIVE",
+ "T72x_COMPUTE_TASKS",
+ "T72x_COMPUTE_THREADS",
+ "T72x_TRIPIPE_ACTIVE",
+ "T72x_ARITH_WORDS",
+ "T72x_ARITH_CYCLES_REG",
+ "T72x_LS_WORDS",
+ "T72x_LS_ISSUES",
+ "T72x_LS_RESTARTS",
+ "T72x_LS_REISSUES_MISS",
+ "T72x_TEX_WORDS",
+ "T72x_TEX_BUBBLES",
+ "T72x_TEX_ISSUES",
+ "T72x_LSC_READ_HITS",
+ "T72x_LSC_READ_MISSES",
+ "T72x_LSC_WRITE_HITS",
+ "T72x_LSC_WRITE_MISSES",
+ "T72x_LSC_ATOMIC_HITS",
+ "T72x_LSC_ATOMIC_MISSES",
+ "T72x_LSC_LINE_FETCHES",
+ "T72x_LSC_DIRTY_LINE",
+ "T72x_LSC_SNOOPS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T72x_L2_EXT_WRITE_BEAT",
+ "T72x_L2_EXT_READ_BEAT",
+ "T72x_L2_READ_SNOOP",
+ "T72x_L2_READ_HIT",
+ "T72x_L2_WRITE_SNOOP",
+ "T72x_L2_WRITE_HIT",
+ "T72x_L2_EXT_WRITE_SMALL",
+ "T72x_L2_EXT_BARRIER",
+ "T72x_L2_EXT_AR_STALL",
+ "T72x_L2_EXT_W_STALL",
+ "T72x_L2_SNOOP_FULL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+};
+
+static const char * const hardware_counters_mali_t76x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T76x_MESSAGES_SENT",
+ "T76x_MESSAGES_RECEIVED",
+ "T76x_GPU_ACTIVE",
+ "T76x_IRQ_ACTIVE",
+ "T76x_JS0_JOBS",
+ "T76x_JS0_TASKS",
+ "T76x_JS0_ACTIVE",
+ "",
+ "T76x_JS0_WAIT_READ",
+ "T76x_JS0_WAIT_ISSUE",
+ "T76x_JS0_WAIT_DEPEND",
+ "T76x_JS0_WAIT_FINISH",
+ "T76x_JS1_JOBS",
+ "T76x_JS1_TASKS",
+ "T76x_JS1_ACTIVE",
+ "",
+ "T76x_JS1_WAIT_READ",
+ "T76x_JS1_WAIT_ISSUE",
+ "T76x_JS1_WAIT_DEPEND",
+ "T76x_JS1_WAIT_FINISH",
+ "T76x_JS2_JOBS",
+ "T76x_JS2_TASKS",
+ "T76x_JS2_ACTIVE",
+ "",
+ "T76x_JS2_WAIT_READ",
+ "T76x_JS2_WAIT_ISSUE",
+ "T76x_JS2_WAIT_DEPEND",
+ "T76x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T76x_TI_JOBS_PROCESSED",
+ "T76x_TI_TRIANGLES",
+ "T76x_TI_QUADS",
+ "T76x_TI_POLYGONS",
+ "T76x_TI_POINTS",
+ "T76x_TI_LINES",
+ "T76x_TI_VCACHE_HIT",
+ "T76x_TI_VCACHE_MISS",
+ "T76x_TI_FRONT_FACING",
+ "T76x_TI_BACK_FACING",
+ "T76x_TI_PRIM_VISIBLE",
+ "T76x_TI_PRIM_CULLED",
+ "T76x_TI_PRIM_CLIPPED",
+ "T76x_TI_LEVEL0",
+ "T76x_TI_LEVEL1",
+ "T76x_TI_LEVEL2",
+ "T76x_TI_LEVEL3",
+ "T76x_TI_LEVEL4",
+ "T76x_TI_LEVEL5",
+ "T76x_TI_LEVEL6",
+ "T76x_TI_LEVEL7",
+ "T76x_TI_COMMAND_1",
+ "T76x_TI_COMMAND_2",
+ "T76x_TI_COMMAND_3",
+ "T76x_TI_COMMAND_4",
+ "T76x_TI_COMMAND_5_7",
+ "T76x_TI_COMMAND_8_15",
+ "T76x_TI_COMMAND_16_63",
+ "T76x_TI_COMMAND_64",
+ "T76x_TI_COMPRESS_IN",
+ "T76x_TI_COMPRESS_OUT",
+ "T76x_TI_COMPRESS_FLUSH",
+ "T76x_TI_TIMESTAMPS",
+ "T76x_TI_PCACHE_HIT",
+ "T76x_TI_PCACHE_MISS",
+ "T76x_TI_PCACHE_LINE",
+ "T76x_TI_PCACHE_STALL",
+ "T76x_TI_WRBUF_HIT",
+ "T76x_TI_WRBUF_MISS",
+ "T76x_TI_WRBUF_LINE",
+ "T76x_TI_WRBUF_PARTIAL",
+ "T76x_TI_WRBUF_STALL",
+ "T76x_TI_ACTIVE",
+ "T76x_TI_LOADING_DESC",
+ "T76x_TI_INDEX_WAIT",
+ "T76x_TI_INDEX_RANGE_WAIT",
+ "T76x_TI_VERTEX_WAIT",
+ "T76x_TI_PCACHE_WAIT",
+ "T76x_TI_WRBUF_WAIT",
+ "T76x_TI_BUS_READ",
+ "T76x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T76x_TI_UTLB_HIT",
+ "T76x_TI_UTLB_NEW_MISS",
+ "T76x_TI_UTLB_REPLAY_FULL",
+ "T76x_TI_UTLB_REPLAY_MISS",
+ "T76x_TI_UTLB_STALL",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T76x_FRAG_ACTIVE",
+ "T76x_FRAG_PRIMITIVES",
+ "T76x_FRAG_PRIMITIVES_DROPPED",
+ "T76x_FRAG_CYCLES_DESC",
+ "T76x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T76x_FRAG_CYCLES_VERT",
+ "T76x_FRAG_CYCLES_TRISETUP",
+ "T76x_FRAG_CYCLES_EZS_ACTIVE",
+ "T76x_FRAG_THREADS",
+ "T76x_FRAG_DUMMY_THREADS",
+ "T76x_FRAG_QUADS_RAST",
+ "T76x_FRAG_QUADS_EZS_TEST",
+ "T76x_FRAG_QUADS_EZS_KILLED",
+ "T76x_FRAG_THREADS_LZS_TEST",
+ "T76x_FRAG_THREADS_LZS_KILLED",
+ "T76x_FRAG_CYCLES_NO_TILE",
+ "T76x_FRAG_NUM_TILES",
+ "T76x_FRAG_TRANS_ELIM",
+ "T76x_COMPUTE_ACTIVE",
+ "T76x_COMPUTE_TASKS",
+ "T76x_COMPUTE_THREADS",
+ "T76x_COMPUTE_CYCLES_DESC",
+ "T76x_TRIPIPE_ACTIVE",
+ "T76x_ARITH_WORDS",
+ "T76x_ARITH_CYCLES_REG",
+ "T76x_ARITH_CYCLES_L0",
+ "T76x_ARITH_FRAG_DEPEND",
+ "T76x_LS_WORDS",
+ "T76x_LS_ISSUES",
+ "T76x_LS_REISSUE_ATTR",
+ "T76x_LS_REISSUES_VARY",
+ "T76x_LS_VARY_RV_MISS",
+ "T76x_LS_VARY_RV_HIT",
+ "T76x_LS_NO_UNPARK",
+ "T76x_TEX_WORDS",
+ "T76x_TEX_BUBBLES",
+ "T76x_TEX_WORDS_L0",
+ "T76x_TEX_WORDS_DESC",
+ "T76x_TEX_ISSUES",
+ "T76x_TEX_RECIRC_FMISS",
+ "T76x_TEX_RECIRC_DESC",
+ "T76x_TEX_RECIRC_MULTI",
+ "T76x_TEX_RECIRC_PMISS",
+ "T76x_TEX_RECIRC_CONF",
+ "T76x_LSC_READ_HITS",
+ "T76x_LSC_READ_OP",
+ "T76x_LSC_WRITE_HITS",
+ "T76x_LSC_WRITE_OP",
+ "T76x_LSC_ATOMIC_HITS",
+ "T76x_LSC_ATOMIC_OP",
+ "T76x_LSC_LINE_FETCHES",
+ "T76x_LSC_DIRTY_LINE",
+ "T76x_LSC_SNOOPS",
+ "T76x_AXI_TLB_STALL",
+ "T76x_AXI_TLB_MISS",
+ "T76x_AXI_TLB_TRANSACTION",
+ "T76x_LS_TLB_MISS",
+ "T76x_LS_TLB_HIT",
+ "T76x_AXI_BEATS_READ",
+ "T76x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T76x_MMU_HIT",
+ "T76x_MMU_NEW_MISS",
+ "T76x_MMU_REPLAY_FULL",
+ "T76x_MMU_REPLAY_MISS",
+ "T76x_MMU_TABLE_WALK",
+ "T76x_MMU_REQUESTS",
+ "",
+ "",
+ "T76x_UTLB_HIT",
+ "T76x_UTLB_NEW_MISS",
+ "T76x_UTLB_REPLAY_FULL",
+ "T76x_UTLB_REPLAY_MISS",
+ "T76x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T76x_L2_EXT_WRITE_BEATS",
+ "T76x_L2_EXT_READ_BEATS",
+ "T76x_L2_ANY_LOOKUP",
+ "T76x_L2_READ_LOOKUP",
+ "T76x_L2_SREAD_LOOKUP",
+ "T76x_L2_READ_REPLAY",
+ "T76x_L2_READ_SNOOP",
+ "T76x_L2_READ_HIT",
+ "T76x_L2_CLEAN_MISS",
+ "T76x_L2_WRITE_LOOKUP",
+ "T76x_L2_SWRITE_LOOKUP",
+ "T76x_L2_WRITE_REPLAY",
+ "T76x_L2_WRITE_SNOOP",
+ "T76x_L2_WRITE_HIT",
+ "T76x_L2_EXT_READ_FULL",
+ "",
+ "T76x_L2_EXT_WRITE_FULL",
+ "T76x_L2_EXT_R_W_HAZARD",
+ "T76x_L2_EXT_READ",
+ "T76x_L2_EXT_READ_LINE",
+ "T76x_L2_EXT_WRITE",
+ "T76x_L2_EXT_WRITE_LINE",
+ "T76x_L2_EXT_WRITE_SMALL",
+ "T76x_L2_EXT_BARRIER",
+ "T76x_L2_EXT_AR_STALL",
+ "T76x_L2_EXT_R_BUF_FULL",
+ "T76x_L2_EXT_RD_BUF_FULL",
+ "T76x_L2_EXT_R_RAW",
+ "T76x_L2_EXT_W_STALL",
+ "T76x_L2_EXT_W_BUF_FULL",
+ "T76x_L2_EXT_R_BUF_FULL",
+ "T76x_L2_TAG_HAZARD",
+ "T76x_L2_SNOOP_FULL",
+ "T76x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t82x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T82x_MESSAGES_SENT",
+ "T82x_MESSAGES_RECEIVED",
+ "T82x_GPU_ACTIVE",
+ "T82x_IRQ_ACTIVE",
+ "T82x_JS0_JOBS",
+ "T82x_JS0_TASKS",
+ "T82x_JS0_ACTIVE",
+ "",
+ "T82x_JS0_WAIT_READ",
+ "T82x_JS0_WAIT_ISSUE",
+ "T82x_JS0_WAIT_DEPEND",
+ "T82x_JS0_WAIT_FINISH",
+ "T82x_JS1_JOBS",
+ "T82x_JS1_TASKS",
+ "T82x_JS1_ACTIVE",
+ "",
+ "T82x_JS1_WAIT_READ",
+ "T82x_JS1_WAIT_ISSUE",
+ "T82x_JS1_WAIT_DEPEND",
+ "T82x_JS1_WAIT_FINISH",
+ "T82x_JS2_JOBS",
+ "T82x_JS2_TASKS",
+ "T82x_JS2_ACTIVE",
+ "",
+ "T82x_JS2_WAIT_READ",
+ "T82x_JS2_WAIT_ISSUE",
+ "T82x_JS2_WAIT_DEPEND",
+ "T82x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T82x_TI_JOBS_PROCESSED",
+ "T82x_TI_TRIANGLES",
+ "T82x_TI_QUADS",
+ "T82x_TI_POLYGONS",
+ "T82x_TI_POINTS",
+ "T82x_TI_LINES",
+ "T82x_TI_FRONT_FACING",
+ "T82x_TI_BACK_FACING",
+ "T82x_TI_PRIM_VISIBLE",
+ "T82x_TI_PRIM_CULLED",
+ "T82x_TI_PRIM_CLIPPED",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T82x_TI_ACTIVE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T82x_FRAG_ACTIVE",
+ "T82x_FRAG_PRIMITIVES",
+ "T82x_FRAG_PRIMITIVES_DROPPED",
+ "T82x_FRAG_CYCLES_DESC",
+ "T82x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T82x_FRAG_CYCLES_VERT",
+ "T82x_FRAG_CYCLES_TRISETUP",
+ "T82x_FRAG_CYCLES_EZS_ACTIVE",
+ "T82x_FRAG_THREADS",
+ "T82x_FRAG_DUMMY_THREADS",
+ "T82x_FRAG_QUADS_RAST",
+ "T82x_FRAG_QUADS_EZS_TEST",
+ "T82x_FRAG_QUADS_EZS_KILLED",
+ "T82x_FRAG_THREADS_LZS_TEST",
+ "T82x_FRAG_THREADS_LZS_KILLED",
+ "T82x_FRAG_CYCLES_NO_TILE",
+ "T82x_FRAG_NUM_TILES",
+ "T82x_FRAG_TRANS_ELIM",
+ "T82x_COMPUTE_ACTIVE",
+ "T82x_COMPUTE_TASKS",
+ "T82x_COMPUTE_THREADS",
+ "T82x_COMPUTE_CYCLES_DESC",
+ "T82x_TRIPIPE_ACTIVE",
+ "T82x_ARITH_WORDS",
+ "T82x_ARITH_CYCLES_REG",
+ "T82x_ARITH_CYCLES_L0",
+ "T82x_ARITH_FRAG_DEPEND",
+ "T82x_LS_WORDS",
+ "T82x_LS_ISSUES",
+ "T82x_LS_REISSUE_ATTR",
+ "T82x_LS_REISSUES_VARY",
+ "T82x_LS_VARY_RV_MISS",
+ "T82x_LS_VARY_RV_HIT",
+ "T82x_LS_NO_UNPARK",
+ "T82x_TEX_WORDS",
+ "T82x_TEX_BUBBLES",
+ "T82x_TEX_WORDS_L0",
+ "T82x_TEX_WORDS_DESC",
+ "T82x_TEX_ISSUES",
+ "T82x_TEX_RECIRC_FMISS",
+ "T82x_TEX_RECIRC_DESC",
+ "T82x_TEX_RECIRC_MULTI",
+ "T82x_TEX_RECIRC_PMISS",
+ "T82x_TEX_RECIRC_CONF",
+ "T82x_LSC_READ_HITS",
+ "T82x_LSC_READ_OP",
+ "T82x_LSC_WRITE_HITS",
+ "T82x_LSC_WRITE_OP",
+ "T82x_LSC_ATOMIC_HITS",
+ "T82x_LSC_ATOMIC_OP",
+ "T82x_LSC_LINE_FETCHES",
+ "T82x_LSC_DIRTY_LINE",
+ "T82x_LSC_SNOOPS",
+ "T82x_AXI_TLB_STALL",
+ "T82x_AXI_TLB_MISS",
+ "T82x_AXI_TLB_TRANSACTION",
+ "T82x_LS_TLB_MISS",
+ "T82x_LS_TLB_HIT",
+ "T82x_AXI_BEATS_READ",
+ "T82x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T82x_MMU_HIT",
+ "T82x_MMU_NEW_MISS",
+ "T82x_MMU_REPLAY_FULL",
+ "T82x_MMU_REPLAY_MISS",
+ "T82x_MMU_TABLE_WALK",
+ "T82x_MMU_REQUESTS",
+ "",
+ "",
+ "T82x_UTLB_HIT",
+ "T82x_UTLB_NEW_MISS",
+ "T82x_UTLB_REPLAY_FULL",
+ "T82x_UTLB_REPLAY_MISS",
+ "T82x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T82x_L2_EXT_WRITE_BEATS",
+ "T82x_L2_EXT_READ_BEATS",
+ "T82x_L2_ANY_LOOKUP",
+ "T82x_L2_READ_LOOKUP",
+ "T82x_L2_SREAD_LOOKUP",
+ "T82x_L2_READ_REPLAY",
+ "T82x_L2_READ_SNOOP",
+ "T82x_L2_READ_HIT",
+ "T82x_L2_CLEAN_MISS",
+ "T82x_L2_WRITE_LOOKUP",
+ "T82x_L2_SWRITE_LOOKUP",
+ "T82x_L2_WRITE_REPLAY",
+ "T82x_L2_WRITE_SNOOP",
+ "T82x_L2_WRITE_HIT",
+ "T82x_L2_EXT_READ_FULL",
+ "",
+ "T82x_L2_EXT_WRITE_FULL",
+ "T82x_L2_EXT_R_W_HAZARD",
+ "T82x_L2_EXT_READ",
+ "T82x_L2_EXT_READ_LINE",
+ "T82x_L2_EXT_WRITE",
+ "T82x_L2_EXT_WRITE_LINE",
+ "T82x_L2_EXT_WRITE_SMALL",
+ "T82x_L2_EXT_BARRIER",
+ "T82x_L2_EXT_AR_STALL",
+ "T82x_L2_EXT_R_BUF_FULL",
+ "T82x_L2_EXT_RD_BUF_FULL",
+ "T82x_L2_EXT_R_RAW",
+ "T82x_L2_EXT_W_STALL",
+ "T82x_L2_EXT_W_BUF_FULL",
+ "T82x_L2_EXT_R_BUF_FULL",
+ "T82x_L2_TAG_HAZARD",
+ "T82x_L2_SNOOP_FULL",
+ "T82x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t83x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T83x_MESSAGES_SENT",
+ "T83x_MESSAGES_RECEIVED",
+ "T83x_GPU_ACTIVE",
+ "T83x_IRQ_ACTIVE",
+ "T83x_JS0_JOBS",
+ "T83x_JS0_TASKS",
+ "T83x_JS0_ACTIVE",
+ "",
+ "T83x_JS0_WAIT_READ",
+ "T83x_JS0_WAIT_ISSUE",
+ "T83x_JS0_WAIT_DEPEND",
+ "T83x_JS0_WAIT_FINISH",
+ "T83x_JS1_JOBS",
+ "T83x_JS1_TASKS",
+ "T83x_JS1_ACTIVE",
+ "",
+ "T83x_JS1_WAIT_READ",
+ "T83x_JS1_WAIT_ISSUE",
+ "T83x_JS1_WAIT_DEPEND",
+ "T83x_JS1_WAIT_FINISH",
+ "T83x_JS2_JOBS",
+ "T83x_JS2_TASKS",
+ "T83x_JS2_ACTIVE",
+ "",
+ "T83x_JS2_WAIT_READ",
+ "T83x_JS2_WAIT_ISSUE",
+ "T83x_JS2_WAIT_DEPEND",
+ "T83x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T83x_TI_JOBS_PROCESSED",
+ "T83x_TI_TRIANGLES",
+ "T83x_TI_QUADS",
+ "T83x_TI_POLYGONS",
+ "T83x_TI_POINTS",
+ "T83x_TI_LINES",
+ "T83x_TI_FRONT_FACING",
+ "T83x_TI_BACK_FACING",
+ "T83x_TI_PRIM_VISIBLE",
+ "T83x_TI_PRIM_CULLED",
+ "T83x_TI_PRIM_CLIPPED",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T83x_TI_ACTIVE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T83x_FRAG_ACTIVE",
+ "T83x_FRAG_PRIMITIVES",
+ "T83x_FRAG_PRIMITIVES_DROPPED",
+ "T83x_FRAG_CYCLES_DESC",
+ "T83x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T83x_FRAG_CYCLES_VERT",
+ "T83x_FRAG_CYCLES_TRISETUP",
+ "T83x_FRAG_CYCLES_EZS_ACTIVE",
+ "T83x_FRAG_THREADS",
+ "T83x_FRAG_DUMMY_THREADS",
+ "T83x_FRAG_QUADS_RAST",
+ "T83x_FRAG_QUADS_EZS_TEST",
+ "T83x_FRAG_QUADS_EZS_KILLED",
+ "T83x_FRAG_THREADS_LZS_TEST",
+ "T83x_FRAG_THREADS_LZS_KILLED",
+ "T83x_FRAG_CYCLES_NO_TILE",
+ "T83x_FRAG_NUM_TILES",
+ "T83x_FRAG_TRANS_ELIM",
+ "T83x_COMPUTE_ACTIVE",
+ "T83x_COMPUTE_TASKS",
+ "T83x_COMPUTE_THREADS",
+ "T83x_COMPUTE_CYCLES_DESC",
+ "T83x_TRIPIPE_ACTIVE",
+ "T83x_ARITH_WORDS",
+ "T83x_ARITH_CYCLES_REG",
+ "T83x_ARITH_CYCLES_L0",
+ "T83x_ARITH_FRAG_DEPEND",
+ "T83x_LS_WORDS",
+ "T83x_LS_ISSUES",
+ "T83x_LS_REISSUE_ATTR",
+ "T83x_LS_REISSUES_VARY",
+ "T83x_LS_VARY_RV_MISS",
+ "T83x_LS_VARY_RV_HIT",
+ "T83x_LS_NO_UNPARK",
+ "T83x_TEX_WORDS",
+ "T83x_TEX_BUBBLES",
+ "T83x_TEX_WORDS_L0",
+ "T83x_TEX_WORDS_DESC",
+ "T83x_TEX_ISSUES",
+ "T83x_TEX_RECIRC_FMISS",
+ "T83x_TEX_RECIRC_DESC",
+ "T83x_TEX_RECIRC_MULTI",
+ "T83x_TEX_RECIRC_PMISS",
+ "T83x_TEX_RECIRC_CONF",
+ "T83x_LSC_READ_HITS",
+ "T83x_LSC_READ_OP",
+ "T83x_LSC_WRITE_HITS",
+ "T83x_LSC_WRITE_OP",
+ "T83x_LSC_ATOMIC_HITS",
+ "T83x_LSC_ATOMIC_OP",
+ "T83x_LSC_LINE_FETCHES",
+ "T83x_LSC_DIRTY_LINE",
+ "T83x_LSC_SNOOPS",
+ "T83x_AXI_TLB_STALL",
+ "T83x_AXI_TLB_MISS",
+ "T83x_AXI_TLB_TRANSACTION",
+ "T83x_LS_TLB_MISS",
+ "T83x_LS_TLB_HIT",
+ "T83x_AXI_BEATS_READ",
+ "T83x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T83x_MMU_HIT",
+ "T83x_MMU_NEW_MISS",
+ "T83x_MMU_REPLAY_FULL",
+ "T83x_MMU_REPLAY_MISS",
+ "T83x_MMU_TABLE_WALK",
+ "T83x_MMU_REQUESTS",
+ "",
+ "",
+ "T83x_UTLB_HIT",
+ "T83x_UTLB_NEW_MISS",
+ "T83x_UTLB_REPLAY_FULL",
+ "T83x_UTLB_REPLAY_MISS",
+ "T83x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T83x_L2_EXT_WRITE_BEATS",
+ "T83x_L2_EXT_READ_BEATS",
+ "T83x_L2_ANY_LOOKUP",
+ "T83x_L2_READ_LOOKUP",
+ "T83x_L2_SREAD_LOOKUP",
+ "T83x_L2_READ_REPLAY",
+ "T83x_L2_READ_SNOOP",
+ "T83x_L2_READ_HIT",
+ "T83x_L2_CLEAN_MISS",
+ "T83x_L2_WRITE_LOOKUP",
+ "T83x_L2_SWRITE_LOOKUP",
+ "T83x_L2_WRITE_REPLAY",
+ "T83x_L2_WRITE_SNOOP",
+ "T83x_L2_WRITE_HIT",
+ "T83x_L2_EXT_READ_FULL",
+ "",
+ "T83x_L2_EXT_WRITE_FULL",
+ "T83x_L2_EXT_R_W_HAZARD",
+ "T83x_L2_EXT_READ",
+ "T83x_L2_EXT_READ_LINE",
+ "T83x_L2_EXT_WRITE",
+ "T83x_L2_EXT_WRITE_LINE",
+ "T83x_L2_EXT_WRITE_SMALL",
+ "T83x_L2_EXT_BARRIER",
+ "T83x_L2_EXT_AR_STALL",
+ "T83x_L2_EXT_R_BUF_FULL",
+ "T83x_L2_EXT_RD_BUF_FULL",
+ "T83x_L2_EXT_R_RAW",
+ "T83x_L2_EXT_W_STALL",
+ "T83x_L2_EXT_W_BUF_FULL",
+ "T83x_L2_EXT_R_BUF_FULL",
+ "T83x_L2_TAG_HAZARD",
+ "T83x_L2_SNOOP_FULL",
+ "T83x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t86x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T86x_MESSAGES_SENT",
+ "T86x_MESSAGES_RECEIVED",
+ "T86x_GPU_ACTIVE",
+ "T86x_IRQ_ACTIVE",
+ "T86x_JS0_JOBS",
+ "T86x_JS0_TASKS",
+ "T86x_JS0_ACTIVE",
+ "",
+ "T86x_JS0_WAIT_READ",
+ "T86x_JS0_WAIT_ISSUE",
+ "T86x_JS0_WAIT_DEPEND",
+ "T86x_JS0_WAIT_FINISH",
+ "T86x_JS1_JOBS",
+ "T86x_JS1_TASKS",
+ "T86x_JS1_ACTIVE",
+ "",
+ "T86x_JS1_WAIT_READ",
+ "T86x_JS1_WAIT_ISSUE",
+ "T86x_JS1_WAIT_DEPEND",
+ "T86x_JS1_WAIT_FINISH",
+ "T86x_JS2_JOBS",
+ "T86x_JS2_TASKS",
+ "T86x_JS2_ACTIVE",
+ "",
+ "T86x_JS2_WAIT_READ",
+ "T86x_JS2_WAIT_ISSUE",
+ "T86x_JS2_WAIT_DEPEND",
+ "T86x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T86x_TI_JOBS_PROCESSED",
+ "T86x_TI_TRIANGLES",
+ "T86x_TI_QUADS",
+ "T86x_TI_POLYGONS",
+ "T86x_TI_POINTS",
+ "T86x_TI_LINES",
+ "T86x_TI_VCACHE_HIT",
+ "T86x_TI_VCACHE_MISS",
+ "T86x_TI_FRONT_FACING",
+ "T86x_TI_BACK_FACING",
+ "T86x_TI_PRIM_VISIBLE",
+ "T86x_TI_PRIM_CULLED",
+ "T86x_TI_PRIM_CLIPPED",
+ "T86x_TI_LEVEL0",
+ "T86x_TI_LEVEL1",
+ "T86x_TI_LEVEL2",
+ "T86x_TI_LEVEL3",
+ "T86x_TI_LEVEL4",
+ "T86x_TI_LEVEL5",
+ "T86x_TI_LEVEL6",
+ "T86x_TI_LEVEL7",
+ "T86x_TI_COMMAND_1",
+ "T86x_TI_COMMAND_2",
+ "T86x_TI_COMMAND_3",
+ "T86x_TI_COMMAND_4",
+ "T86x_TI_COMMAND_5_7",
+ "T86x_TI_COMMAND_8_15",
+ "T86x_TI_COMMAND_16_63",
+ "T86x_TI_COMMAND_64",
+ "T86x_TI_COMPRESS_IN",
+ "T86x_TI_COMPRESS_OUT",
+ "T86x_TI_COMPRESS_FLUSH",
+ "T86x_TI_TIMESTAMPS",
+ "T86x_TI_PCACHE_HIT",
+ "T86x_TI_PCACHE_MISS",
+ "T86x_TI_PCACHE_LINE",
+ "T86x_TI_PCACHE_STALL",
+ "T86x_TI_WRBUF_HIT",
+ "T86x_TI_WRBUF_MISS",
+ "T86x_TI_WRBUF_LINE",
+ "T86x_TI_WRBUF_PARTIAL",
+ "T86x_TI_WRBUF_STALL",
+ "T86x_TI_ACTIVE",
+ "T86x_TI_LOADING_DESC",
+ "T86x_TI_INDEX_WAIT",
+ "T86x_TI_INDEX_RANGE_WAIT",
+ "T86x_TI_VERTEX_WAIT",
+ "T86x_TI_PCACHE_WAIT",
+ "T86x_TI_WRBUF_WAIT",
+ "T86x_TI_BUS_READ",
+ "T86x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T86x_TI_UTLB_HIT",
+ "T86x_TI_UTLB_NEW_MISS",
+ "T86x_TI_UTLB_REPLAY_FULL",
+ "T86x_TI_UTLB_REPLAY_MISS",
+ "T86x_TI_UTLB_STALL",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T86x_FRAG_ACTIVE",
+ "T86x_FRAG_PRIMITIVES",
+ "T86x_FRAG_PRIMITIVES_DROPPED",
+ "T86x_FRAG_CYCLES_DESC",
+ "T86x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T86x_FRAG_CYCLES_VERT",
+ "T86x_FRAG_CYCLES_TRISETUP",
+ "T86x_FRAG_CYCLES_EZS_ACTIVE",
+ "T86x_FRAG_THREADS",
+ "T86x_FRAG_DUMMY_THREADS",
+ "T86x_FRAG_QUADS_RAST",
+ "T86x_FRAG_QUADS_EZS_TEST",
+ "T86x_FRAG_QUADS_EZS_KILLED",
+ "T86x_FRAG_THREADS_LZS_TEST",
+ "T86x_FRAG_THREADS_LZS_KILLED",
+ "T86x_FRAG_CYCLES_NO_TILE",
+ "T86x_FRAG_NUM_TILES",
+ "T86x_FRAG_TRANS_ELIM",
+ "T86x_COMPUTE_ACTIVE",
+ "T86x_COMPUTE_TASKS",
+ "T86x_COMPUTE_THREADS",
+ "T86x_COMPUTE_CYCLES_DESC",
+ "T86x_TRIPIPE_ACTIVE",
+ "T86x_ARITH_WORDS",
+ "T86x_ARITH_CYCLES_REG",
+ "T86x_ARITH_CYCLES_L0",
+ "T86x_ARITH_FRAG_DEPEND",
+ "T86x_LS_WORDS",
+ "T86x_LS_ISSUES",
+ "T86x_LS_REISSUE_ATTR",
+ "T86x_LS_REISSUES_VARY",
+ "T86x_LS_VARY_RV_MISS",
+ "T86x_LS_VARY_RV_HIT",
+ "T86x_LS_NO_UNPARK",
+ "T86x_TEX_WORDS",
+ "T86x_TEX_BUBBLES",
+ "T86x_TEX_WORDS_L0",
+ "T86x_TEX_WORDS_DESC",
+ "T86x_TEX_ISSUES",
+ "T86x_TEX_RECIRC_FMISS",
+ "T86x_TEX_RECIRC_DESC",
+ "T86x_TEX_RECIRC_MULTI",
+ "T86x_TEX_RECIRC_PMISS",
+ "T86x_TEX_RECIRC_CONF",
+ "T86x_LSC_READ_HITS",
+ "T86x_LSC_READ_OP",
+ "T86x_LSC_WRITE_HITS",
+ "T86x_LSC_WRITE_OP",
+ "T86x_LSC_ATOMIC_HITS",
+ "T86x_LSC_ATOMIC_OP",
+ "T86x_LSC_LINE_FETCHES",
+ "T86x_LSC_DIRTY_LINE",
+ "T86x_LSC_SNOOPS",
+ "T86x_AXI_TLB_STALL",
+ "T86x_AXI_TLB_MISS",
+ "T86x_AXI_TLB_TRANSACTION",
+ "T86x_LS_TLB_MISS",
+ "T86x_LS_TLB_HIT",
+ "T86x_AXI_BEATS_READ",
+ "T86x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T86x_MMU_HIT",
+ "T86x_MMU_NEW_MISS",
+ "T86x_MMU_REPLAY_FULL",
+ "T86x_MMU_REPLAY_MISS",
+ "T86x_MMU_TABLE_WALK",
+ "T86x_MMU_REQUESTS",
+ "",
+ "",
+ "T86x_UTLB_HIT",
+ "T86x_UTLB_NEW_MISS",
+ "T86x_UTLB_REPLAY_FULL",
+ "T86x_UTLB_REPLAY_MISS",
+ "T86x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T86x_L2_EXT_WRITE_BEATS",
+ "T86x_L2_EXT_READ_BEATS",
+ "T86x_L2_ANY_LOOKUP",
+ "T86x_L2_READ_LOOKUP",
+ "T86x_L2_SREAD_LOOKUP",
+ "T86x_L2_READ_REPLAY",
+ "T86x_L2_READ_SNOOP",
+ "T86x_L2_READ_HIT",
+ "T86x_L2_CLEAN_MISS",
+ "T86x_L2_WRITE_LOOKUP",
+ "T86x_L2_SWRITE_LOOKUP",
+ "T86x_L2_WRITE_REPLAY",
+ "T86x_L2_WRITE_SNOOP",
+ "T86x_L2_WRITE_HIT",
+ "T86x_L2_EXT_READ_FULL",
+ "",
+ "T86x_L2_EXT_WRITE_FULL",
+ "T86x_L2_EXT_R_W_HAZARD",
+ "T86x_L2_EXT_READ",
+ "T86x_L2_EXT_READ_LINE",
+ "T86x_L2_EXT_WRITE",
+ "T86x_L2_EXT_WRITE_LINE",
+ "T86x_L2_EXT_WRITE_SMALL",
+ "T86x_L2_EXT_BARRIER",
+ "T86x_L2_EXT_AR_STALL",
+ "T86x_L2_EXT_R_BUF_FULL",
+ "T86x_L2_EXT_RD_BUF_FULL",
+ "T86x_L2_EXT_R_RAW",
+ "T86x_L2_EXT_W_STALL",
+ "T86x_L2_EXT_W_BUF_FULL",
+ "T86x_L2_EXT_R_BUF_FULL",
+ "T86x_L2_TAG_HAZARD",
+ "T86x_L2_SNOOP_FULL",
+ "T86x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t88x[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "T88x_MESSAGES_SENT",
+ "T88x_MESSAGES_RECEIVED",
+ "T88x_GPU_ACTIVE",
+ "T88x_IRQ_ACTIVE",
+ "T88x_JS0_JOBS",
+ "T88x_JS0_TASKS",
+ "T88x_JS0_ACTIVE",
+ "",
+ "T88x_JS0_WAIT_READ",
+ "T88x_JS0_WAIT_ISSUE",
+ "T88x_JS0_WAIT_DEPEND",
+ "T88x_JS0_WAIT_FINISH",
+ "T88x_JS1_JOBS",
+ "T88x_JS1_TASKS",
+ "T88x_JS1_ACTIVE",
+ "",
+ "T88x_JS1_WAIT_READ",
+ "T88x_JS1_WAIT_ISSUE",
+ "T88x_JS1_WAIT_DEPEND",
+ "T88x_JS1_WAIT_FINISH",
+ "T88x_JS2_JOBS",
+ "T88x_JS2_TASKS",
+ "T88x_JS2_ACTIVE",
+ "",
+ "T88x_JS2_WAIT_READ",
+ "T88x_JS2_WAIT_ISSUE",
+ "T88x_JS2_WAIT_DEPEND",
+ "T88x_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "T88x_TI_JOBS_PROCESSED",
+ "T88x_TI_TRIANGLES",
+ "T88x_TI_QUADS",
+ "T88x_TI_POLYGONS",
+ "T88x_TI_POINTS",
+ "T88x_TI_LINES",
+ "T88x_TI_VCACHE_HIT",
+ "T88x_TI_VCACHE_MISS",
+ "T88x_TI_FRONT_FACING",
+ "T88x_TI_BACK_FACING",
+ "T88x_TI_PRIM_VISIBLE",
+ "T88x_TI_PRIM_CULLED",
+ "T88x_TI_PRIM_CLIPPED",
+ "T88x_TI_LEVEL0",
+ "T88x_TI_LEVEL1",
+ "T88x_TI_LEVEL2",
+ "T88x_TI_LEVEL3",
+ "T88x_TI_LEVEL4",
+ "T88x_TI_LEVEL5",
+ "T88x_TI_LEVEL6",
+ "T88x_TI_LEVEL7",
+ "T88x_TI_COMMAND_1",
+ "T88x_TI_COMMAND_2",
+ "T88x_TI_COMMAND_3",
+ "T88x_TI_COMMAND_4",
+ "T88x_TI_COMMAND_5_7",
+ "T88x_TI_COMMAND_8_15",
+ "T88x_TI_COMMAND_16_63",
+ "T88x_TI_COMMAND_64",
+ "T88x_TI_COMPRESS_IN",
+ "T88x_TI_COMPRESS_OUT",
+ "T88x_TI_COMPRESS_FLUSH",
+ "T88x_TI_TIMESTAMPS",
+ "T88x_TI_PCACHE_HIT",
+ "T88x_TI_PCACHE_MISS",
+ "T88x_TI_PCACHE_LINE",
+ "T88x_TI_PCACHE_STALL",
+ "T88x_TI_WRBUF_HIT",
+ "T88x_TI_WRBUF_MISS",
+ "T88x_TI_WRBUF_LINE",
+ "T88x_TI_WRBUF_PARTIAL",
+ "T88x_TI_WRBUF_STALL",
+ "T88x_TI_ACTIVE",
+ "T88x_TI_LOADING_DESC",
+ "T88x_TI_INDEX_WAIT",
+ "T88x_TI_INDEX_RANGE_WAIT",
+ "T88x_TI_VERTEX_WAIT",
+ "T88x_TI_PCACHE_WAIT",
+ "T88x_TI_WRBUF_WAIT",
+ "T88x_TI_BUS_READ",
+ "T88x_TI_BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T88x_TI_UTLB_HIT",
+ "T88x_TI_UTLB_NEW_MISS",
+ "T88x_TI_UTLB_REPLAY_FULL",
+ "T88x_TI_UTLB_REPLAY_MISS",
+ "T88x_TI_UTLB_STALL",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "T88x_FRAG_ACTIVE",
+ "T88x_FRAG_PRIMITIVES",
+ "T88x_FRAG_PRIMITIVES_DROPPED",
+ "T88x_FRAG_CYCLES_DESC",
+ "T88x_FRAG_CYCLES_FPKQ_ACTIVE",
+ "T88x_FRAG_CYCLES_VERT",
+ "T88x_FRAG_CYCLES_TRISETUP",
+ "T88x_FRAG_CYCLES_EZS_ACTIVE",
+ "T88x_FRAG_THREADS",
+ "T88x_FRAG_DUMMY_THREADS",
+ "T88x_FRAG_QUADS_RAST",
+ "T88x_FRAG_QUADS_EZS_TEST",
+ "T88x_FRAG_QUADS_EZS_KILLED",
+ "T88x_FRAG_THREADS_LZS_TEST",
+ "T88x_FRAG_THREADS_LZS_KILLED",
+ "T88x_FRAG_CYCLES_NO_TILE",
+ "T88x_FRAG_NUM_TILES",
+ "T88x_FRAG_TRANS_ELIM",
+ "T88x_COMPUTE_ACTIVE",
+ "T88x_COMPUTE_TASKS",
+ "T88x_COMPUTE_THREADS",
+ "T88x_COMPUTE_CYCLES_DESC",
+ "T88x_TRIPIPE_ACTIVE",
+ "T88x_ARITH_WORDS",
+ "T88x_ARITH_CYCLES_REG",
+ "T88x_ARITH_CYCLES_L0",
+ "T88x_ARITH_FRAG_DEPEND",
+ "T88x_LS_WORDS",
+ "T88x_LS_ISSUES",
+ "T88x_LS_REISSUE_ATTR",
+ "T88x_LS_REISSUES_VARY",
+ "T88x_LS_VARY_RV_MISS",
+ "T88x_LS_VARY_RV_HIT",
+ "T88x_LS_NO_UNPARK",
+ "T88x_TEX_WORDS",
+ "T88x_TEX_BUBBLES",
+ "T88x_TEX_WORDS_L0",
+ "T88x_TEX_WORDS_DESC",
+ "T88x_TEX_ISSUES",
+ "T88x_TEX_RECIRC_FMISS",
+ "T88x_TEX_RECIRC_DESC",
+ "T88x_TEX_RECIRC_MULTI",
+ "T88x_TEX_RECIRC_PMISS",
+ "T88x_TEX_RECIRC_CONF",
+ "T88x_LSC_READ_HITS",
+ "T88x_LSC_READ_OP",
+ "T88x_LSC_WRITE_HITS",
+ "T88x_LSC_WRITE_OP",
+ "T88x_LSC_ATOMIC_HITS",
+ "T88x_LSC_ATOMIC_OP",
+ "T88x_LSC_LINE_FETCHES",
+ "T88x_LSC_DIRTY_LINE",
+ "T88x_LSC_SNOOPS",
+ "T88x_AXI_TLB_STALL",
+ "T88x_AXI_TLB_MISS",
+ "T88x_AXI_TLB_TRANSACTION",
+ "T88x_LS_TLB_MISS",
+ "T88x_LS_TLB_HIT",
+ "T88x_AXI_BEATS_READ",
+ "T88x_AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "T88x_MMU_HIT",
+ "T88x_MMU_NEW_MISS",
+ "T88x_MMU_REPLAY_FULL",
+ "T88x_MMU_REPLAY_MISS",
+ "T88x_MMU_TABLE_WALK",
+ "T88x_MMU_REQUESTS",
+ "",
+ "",
+ "T88x_UTLB_HIT",
+ "T88x_UTLB_NEW_MISS",
+ "T88x_UTLB_REPLAY_FULL",
+ "T88x_UTLB_REPLAY_MISS",
+ "T88x_UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "T88x_L2_EXT_WRITE_BEATS",
+ "T88x_L2_EXT_READ_BEATS",
+ "T88x_L2_ANY_LOOKUP",
+ "T88x_L2_READ_LOOKUP",
+ "T88x_L2_SREAD_LOOKUP",
+ "T88x_L2_READ_REPLAY",
+ "T88x_L2_READ_SNOOP",
+ "T88x_L2_READ_HIT",
+ "T88x_L2_CLEAN_MISS",
+ "T88x_L2_WRITE_LOOKUP",
+ "T88x_L2_SWRITE_LOOKUP",
+ "T88x_L2_WRITE_REPLAY",
+ "T88x_L2_WRITE_SNOOP",
+ "T88x_L2_WRITE_HIT",
+ "T88x_L2_EXT_READ_FULL",
+ "",
+ "T88x_L2_EXT_WRITE_FULL",
+ "T88x_L2_EXT_R_W_HAZARD",
+ "T88x_L2_EXT_READ",
+ "T88x_L2_EXT_READ_LINE",
+ "T88x_L2_EXT_WRITE",
+ "T88x_L2_EXT_WRITE_LINE",
+ "T88x_L2_EXT_WRITE_SMALL",
+ "T88x_L2_EXT_BARRIER",
+ "T88x_L2_EXT_AR_STALL",
+ "T88x_L2_EXT_R_BUF_FULL",
+ "T88x_L2_EXT_RD_BUF_FULL",
+ "T88x_L2_EXT_R_RAW",
+ "T88x_L2_EXT_W_STALL",
+ "T88x_L2_EXT_W_BUF_FULL",
+ "T88x_L2_EXT_R_BUF_FULL",
+ "T88x_L2_TAG_HAZARD",
+ "T88x_L2_SNOOP_FULL",
+ "T88x_L2_REPLAY_FULL"
+};
+
+#include "mali_kbase_gator_hwcnt_names_tmix.h"
+
+#include "mali_kbase_gator_hwcnt_names_thex.h"
+
+
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_gator_hwcnt_names_thex.h b/midgard-0.r2p0/mali_kbase_gator_hwcnt_names_thex.h
new file mode 100755
index 0000000..bcceef4
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gator_hwcnt_names_thex.h
@@ -0,0 +1,291 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_THEX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_THEX_H_
+
+static const char * const hardware_counters_mali_tHEx[] = {
+ /* Performance counters for the Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "THEx_MESSAGES_SENT",
+ "THEx_MESSAGES_RECEIVED",
+ "THEx_GPU_ACTIVE",
+ "THEx_IRQ_ACTIVE",
+ "THEx_JS0_JOBS",
+ "THEx_JS0_TASKS",
+ "THEx_JS0_ACTIVE",
+ "",
+ "THEx_JS0_WAIT_READ",
+ "THEx_JS0_WAIT_ISSUE",
+ "THEx_JS0_WAIT_DEPEND",
+ "THEx_JS0_WAIT_FINISH",
+ "THEx_JS1_JOBS",
+ "THEx_JS1_TASKS",
+ "THEx_JS1_ACTIVE",
+ "",
+ "THEx_JS1_WAIT_READ",
+ "THEx_JS1_WAIT_ISSUE",
+ "THEx_JS1_WAIT_DEPEND",
+ "THEx_JS1_WAIT_FINISH",
+ "THEx_JS2_JOBS",
+ "THEx_JS2_TASKS",
+ "THEx_JS2_ACTIVE",
+ "",
+ "THEx_JS2_WAIT_READ",
+ "THEx_JS2_WAIT_ISSUE",
+ "THEx_JS2_WAIT_DEPEND",
+ "THEx_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Performance counters for the Tiler */
+ "",
+ "",
+ "",
+ "",
+ "THEx_TILER_ACTIVE",
+ "THEx_JOBS_PROCESSED",
+ "THEx_TRIANGLES",
+ "THEx_LINES",
+ "THEx_POINTS",
+ "THEx_FRONT_FACING",
+ "THEx_BACK_FACING",
+ "THEx_PRIM_VISIBLE",
+ "THEx_PRIM_CULLED",
+ "THEx_PRIM_CLIPPED",
+ "THEx_PRIM_SAT_CULLED",
+ "",
+ "",
+ "THEx_BUS_READ",
+ "",
+ "THEx_BUS_WRITE",
+ "THEx_LOADING_DESC",
+ "THEx_IDVS_POS_SHAD_REQ",
+ "THEx_IDVS_POS_SHAD_WAIT",
+ "THEx_IDVS_POS_SHAD_STALL",
+ "THEx_IDVS_POS_FIFO_FULL",
+ "THEx_PREFETCH_STALL",
+ "THEx_VCACHE_HIT",
+ "THEx_VCACHE_MISS",
+ "THEx_VCACHE_LINE_WAIT",
+ "THEx_VFETCH_POS_READ_WAIT",
+ "THEx_VFETCH_VERTEX_WAIT",
+ "THEx_VFETCH_STALL",
+ "THEx_PRIMASSY_STALL",
+ "THEx_BBOX_GEN_STALL",
+ "THEx_IDVS_VBU_HIT",
+ "THEx_IDVS_VBU_MISS",
+ "THEx_IDVS_VBU_LINE_DEALLOCATE",
+ "THEx_IDVS_VAR_SHAD_REQ",
+ "THEx_IDVS_VAR_SHAD_STALL",
+ "THEx_BINNER_STALL",
+ "THEx_ITER_STALL",
+ "THEx_COMPRESS_MISS",
+ "THEx_COMPRESS_STALL",
+ "THEx_PCACHE_HIT",
+ "THEx_PCACHE_MISS",
+ "THEx_PCACHE_MISS_STALL",
+ "THEx_PCACHE_EVICT_STALL",
+ "THEx_PMGR_PTR_WR_STALL",
+ "THEx_PMGR_PTR_RD_STALL",
+ "THEx_PMGR_CMD_WR_STALL",
+ "THEx_WRBUF_ACTIVE",
+ "THEx_WRBUF_HIT",
+ "THEx_WRBUF_MISS",
+ "THEx_WRBUF_NO_FREE_LINE_STALL",
+ "THEx_WRBUF_NO_AXI_ID_STALL",
+ "THEx_WRBUF_AXI_STALL",
+ "",
+ "",
+ "",
+ "THEx_UTLB_TRANS",
+ "THEx_UTLB_TRANS_HIT",
+ "THEx_UTLB_TRANS_STALL",
+ "THEx_UTLB_TRANS_MISS_DELAY",
+ "THEx_UTLB_MMU_REQ",
+
+ /* Performance counters for the Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "THEx_FRAG_ACTIVE",
+ "THEx_FRAG_PRIMITIVES",
+ "THEx_FRAG_PRIM_RAST",
+ "THEx_FRAG_FPK_ACTIVE",
+ "THEx_FRAG_STARVING",
+ "THEx_FRAG_WARPS",
+ "THEx_FRAG_PARTIAL_WARPS",
+ "THEx_FRAG_QUADS_RAST",
+ "THEx_FRAG_QUADS_EZS_TEST",
+ "THEx_FRAG_QUADS_EZS_UPDATE",
+ "THEx_FRAG_QUADS_EZS_KILL",
+ "THEx_FRAG_LZS_TEST",
+ "THEx_FRAG_LZS_KILL",
+ "",
+ "THEx_FRAG_PTILES",
+ "THEx_FRAG_TRANS_ELIM",
+ "THEx_QUAD_FPK_KILLER",
+ "",
+ "THEx_COMPUTE_ACTIVE",
+ "THEx_COMPUTE_TASKS",
+ "THEx_COMPUTE_WARPS",
+ "THEx_COMPUTE_STARVING",
+ "THEx_EXEC_CORE_ACTIVE",
+ "THEx_EXEC_ACTIVE",
+ "THEx_EXEC_INSTR_COUNT",
+ "THEx_EXEC_INSTR_DIVERGED",
+ "THEx_EXEC_INSTR_STARVING",
+ "THEx_ARITH_INSTR_SINGLE_FMA",
+ "THEx_ARITH_INSTR_DOUBLE",
+ "THEx_ARITH_INSTR_MSG",
+ "THEx_ARITH_INSTR_MSG_ONLY",
+ "THEx_TEX_INSTR",
+ "THEx_TEX_INSTR_MIPMAP",
+ "THEx_TEX_INSTR_COMPRESSED",
+ "THEx_TEX_INSTR_3D",
+ "THEx_TEX_INSTR_TRILINEAR",
+ "THEx_TEX_COORD_ISSUE",
+ "THEx_TEX_COORD_STALL",
+ "THEx_TEX_STARVE_CACHE",
+ "THEx_TEX_STARVE_FILTER",
+ "THEx_LS_MEM_READ_FULL",
+ "THEx_LS_MEM_READ_SHORT",
+ "THEx_LS_MEM_WRITE_FULL",
+ "THEx_LS_MEM_WRITE_SHORT",
+ "THEx_LS_MEM_ATOMIC",
+ "THEx_VARY_INSTR",
+ "THEx_VARY_SLOT_32",
+ "THEx_VARY_SLOT_16",
+ "THEx_ATTR_INSTR",
+ "THEx_ARITH_INSTR_FP_MUL",
+ "THEx_BEATS_RD_FTC",
+ "THEx_BEATS_RD_FTC_EXT",
+ "THEx_BEATS_RD_LSC",
+ "THEx_BEATS_RD_LSC_EXT",
+ "THEx_BEATS_RD_TEX",
+ "THEx_BEATS_RD_TEX_EXT",
+ "THEx_BEATS_RD_OTHER",
+ "THEx_BEATS_WR_LSC",
+ "THEx_BEATS_WR_TIB",
+ "",
+
+ /* Performance counters for the Memory System */
+ "",
+ "",
+ "",
+ "",
+ "THEx_MMU_REQUESTS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "THEx_L2_RD_MSG_IN",
+ "THEx_L2_RD_MSG_IN_STALL",
+ "THEx_L2_WR_MSG_IN",
+ "THEx_L2_WR_MSG_IN_STALL",
+ "THEx_L2_SNP_MSG_IN",
+ "THEx_L2_SNP_MSG_IN_STALL",
+ "THEx_L2_RD_MSG_OUT",
+ "THEx_L2_RD_MSG_OUT_STALL",
+ "THEx_L2_WR_MSG_OUT",
+ "THEx_L2_ANY_LOOKUP",
+ "THEx_L2_READ_LOOKUP",
+ "THEx_L2_WRITE_LOOKUP",
+ "THEx_L2_EXT_SNOOP_LOOKUP",
+ "THEx_L2_EXT_READ",
+ "THEx_L2_EXT_READ_NOSNP",
+ "THEx_L2_EXT_READ_UNIQUE",
+ "THEx_L2_EXT_READ_BEATS",
+ "THEx_L2_EXT_AR_STALL",
+ "THEx_L2_EXT_AR_CNT_Q1",
+ "THEx_L2_EXT_AR_CNT_Q2",
+ "THEx_L2_EXT_AR_CNT_Q3",
+ "THEx_L2_EXT_RRESP_0_127",
+ "THEx_L2_EXT_RRESP_128_191",
+ "THEx_L2_EXT_RRESP_192_255",
+ "THEx_L2_EXT_RRESP_256_319",
+ "THEx_L2_EXT_RRESP_320_383",
+ "THEx_L2_EXT_WRITE",
+ "THEx_L2_EXT_WRITE_NOSNP_FULL",
+ "THEx_L2_EXT_WRITE_NOSNP_PTL",
+ "THEx_L2_EXT_WRITE_SNP_FULL",
+ "THEx_L2_EXT_WRITE_SNP_PTL",
+ "THEx_L2_EXT_WRITE_BEATS",
+ "THEx_L2_EXT_W_STALL",
+ "THEx_L2_EXT_AW_CNT_Q1",
+ "THEx_L2_EXT_AW_CNT_Q2",
+ "THEx_L2_EXT_AW_CNT_Q3",
+ "THEx_L2_EXT_SNOOP",
+ "THEx_L2_EXT_SNOOP_STALL",
+ "THEx_L2_EXT_SNOOP_RESP_CLEAN",
+ "THEx_L2_EXT_SNOOP_RESP_DATA",
+ "THEx_L2_EXT_SNOOP_INTERNAL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_THEX_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_gator_hwcnt_names_tmix.h b/midgard-0.r2p0/mali_kbase_gator_hwcnt_names_tmix.h
new file mode 100755
index 0000000..5ea0677
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gator_hwcnt_names_tmix.h
@@ -0,0 +1,291 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TMIX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TMIX_H_
+
+static const char * const hardware_counters_mali_tMIx[] = {
+ /* Performance counters for the Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "TMIx_MESSAGES_SENT",
+ "TMIx_MESSAGES_RECEIVED",
+ "TMIx_GPU_ACTIVE",
+ "TMIx_IRQ_ACTIVE",
+ "TMIx_JS0_JOBS",
+ "TMIx_JS0_TASKS",
+ "TMIx_JS0_ACTIVE",
+ "",
+ "TMIx_JS0_WAIT_READ",
+ "TMIx_JS0_WAIT_ISSUE",
+ "TMIx_JS0_WAIT_DEPEND",
+ "TMIx_JS0_WAIT_FINISH",
+ "TMIx_JS1_JOBS",
+ "TMIx_JS1_TASKS",
+ "TMIx_JS1_ACTIVE",
+ "",
+ "TMIx_JS1_WAIT_READ",
+ "TMIx_JS1_WAIT_ISSUE",
+ "TMIx_JS1_WAIT_DEPEND",
+ "TMIx_JS1_WAIT_FINISH",
+ "TMIx_JS2_JOBS",
+ "TMIx_JS2_TASKS",
+ "TMIx_JS2_ACTIVE",
+ "",
+ "TMIx_JS2_WAIT_READ",
+ "TMIx_JS2_WAIT_ISSUE",
+ "TMIx_JS2_WAIT_DEPEND",
+ "TMIx_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Performance counters for the Tiler */
+ "",
+ "",
+ "",
+ "",
+ "TMIx_TILER_ACTIVE",
+ "TMIx_JOBS_PROCESSED",
+ "TMIx_TRIANGLES",
+ "TMIx_LINES",
+ "TMIx_POINTS",
+ "TMIx_FRONT_FACING",
+ "TMIx_BACK_FACING",
+ "TMIx_PRIM_VISIBLE",
+ "TMIx_PRIM_CULLED",
+ "TMIx_PRIM_CLIPPED",
+ "TMIx_PRIM_SAT_CULLED",
+ "",
+ "",
+ "TMIx_BUS_READ",
+ "",
+ "TMIx_BUS_WRITE",
+ "TMIx_LOADING_DESC",
+ "TMIx_IDVS_POS_SHAD_REQ",
+ "TMIx_IDVS_POS_SHAD_WAIT",
+ "TMIx_IDVS_POS_SHAD_STALL",
+ "TMIx_IDVS_POS_FIFO_FULL",
+ "TMIx_PREFETCH_STALL",
+ "TMIx_VCACHE_HIT",
+ "TMIx_VCACHE_MISS",
+ "TMIx_VCACHE_LINE_WAIT",
+ "TMIx_VFETCH_POS_READ_WAIT",
+ "TMIx_VFETCH_VERTEX_WAIT",
+ "TMIx_VFETCH_STALL",
+ "TMIx_PRIMASSY_STALL",
+ "TMIx_BBOX_GEN_STALL",
+ "TMIx_IDVS_VBU_HIT",
+ "TMIx_IDVS_VBU_MISS",
+ "TMIx_IDVS_VBU_LINE_DEALLOCATE",
+ "TMIx_IDVS_VAR_SHAD_REQ",
+ "TMIx_IDVS_VAR_SHAD_STALL",
+ "TMIx_BINNER_STALL",
+ "TMIx_ITER_STALL",
+ "TMIx_COMPRESS_MISS",
+ "TMIx_COMPRESS_STALL",
+ "TMIx_PCACHE_HIT",
+ "TMIx_PCACHE_MISS",
+ "TMIx_PCACHE_MISS_STALL",
+ "TMIx_PCACHE_EVICT_STALL",
+ "TMIx_PMGR_PTR_WR_STALL",
+ "TMIx_PMGR_PTR_RD_STALL",
+ "TMIx_PMGR_CMD_WR_STALL",
+ "TMIx_WRBUF_ACTIVE",
+ "TMIx_WRBUF_HIT",
+ "TMIx_WRBUF_MISS",
+ "TMIx_WRBUF_NO_FREE_LINE_STALL",
+ "TMIx_WRBUF_NO_AXI_ID_STALL",
+ "TMIx_WRBUF_AXI_STALL",
+ "",
+ "",
+ "",
+ "TMIx_UTLB_TRANS",
+ "TMIx_UTLB_TRANS_HIT",
+ "TMIx_UTLB_TRANS_STALL",
+ "TMIx_UTLB_TRANS_MISS_DELAY",
+ "TMIx_UTLB_MMU_REQ",
+
+ /* Performance counters for the Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "TMIx_FRAG_ACTIVE",
+ "TMIx_FRAG_PRIMITIVES",
+ "TMIx_FRAG_PRIM_RAST",
+ "TMIx_FRAG_FPK_ACTIVE",
+ "TMIx_FRAG_STARVING",
+ "TMIx_FRAG_WARPS",
+ "TMIx_FRAG_PARTIAL_WARPS",
+ "TMIx_FRAG_QUADS_RAST",
+ "TMIx_FRAG_QUADS_EZS_TEST",
+ "TMIx_FRAG_QUADS_EZS_UPDATE",
+ "TMIx_FRAG_QUADS_EZS_KILL",
+ "TMIx_FRAG_LZS_TEST",
+ "TMIx_FRAG_LZS_KILL",
+ "",
+ "TMIx_FRAG_PTILES",
+ "TMIx_FRAG_TRANS_ELIM",
+ "TMIx_QUAD_FPK_KILLER",
+ "",
+ "TMIx_COMPUTE_ACTIVE",
+ "TMIx_COMPUTE_TASKS",
+ "TMIx_COMPUTE_WARPS",
+ "TMIx_COMPUTE_STARVING",
+ "TMIx_EXEC_CORE_ACTIVE",
+ "TMIx_EXEC_ACTIVE",
+ "TMIx_EXEC_INSTR_COUNT",
+ "TMIx_EXEC_INSTR_DIVERGED",
+ "TMIx_EXEC_INSTR_STARVING",
+ "TMIx_ARITH_INSTR_SINGLE_FMA",
+ "TMIx_ARITH_INSTR_DOUBLE",
+ "TMIx_ARITH_INSTR_MSG",
+ "TMIx_ARITH_INSTR_MSG_ONLY",
+ "TMIx_TEX_INSTR",
+ "TMIx_TEX_INSTR_MIPMAP",
+ "TMIx_TEX_INSTR_COMPRESSED",
+ "TMIx_TEX_INSTR_3D",
+ "TMIx_TEX_INSTR_TRILINEAR",
+ "TMIx_TEX_COORD_ISSUE",
+ "TMIx_TEX_COORD_STALL",
+ "TMIx_TEX_STARVE_CACHE",
+ "TMIx_TEX_STARVE_FILTER",
+ "TMIx_LS_MEM_READ_FULL",
+ "TMIx_LS_MEM_READ_SHORT",
+ "TMIx_LS_MEM_WRITE_FULL",
+ "TMIx_LS_MEM_WRITE_SHORT",
+ "TMIx_LS_MEM_ATOMIC",
+ "TMIx_VARY_INSTR",
+ "TMIx_VARY_SLOT_32",
+ "TMIx_VARY_SLOT_16",
+ "TMIx_ATTR_INSTR",
+ "TMIx_ARITH_INSTR_FP_MUL",
+ "TMIx_BEATS_RD_FTC",
+ "TMIx_BEATS_RD_FTC_EXT",
+ "TMIx_BEATS_RD_LSC",
+ "TMIx_BEATS_RD_LSC_EXT",
+ "TMIx_BEATS_RD_TEX",
+ "TMIx_BEATS_RD_TEX_EXT",
+ "TMIx_BEATS_RD_OTHER",
+ "TMIx_BEATS_WR_LSC",
+ "TMIx_BEATS_WR_TIB",
+ "",
+
+ /* Performance counters for the Memory System */
+ "",
+ "",
+ "",
+ "",
+ "TMIx_MMU_REQUESTS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "TMIx_L2_RD_MSG_IN",
+ "TMIx_L2_RD_MSG_IN_STALL",
+ "TMIx_L2_WR_MSG_IN",
+ "TMIx_L2_WR_MSG_IN_STALL",
+ "TMIx_L2_SNP_MSG_IN",
+ "TMIx_L2_SNP_MSG_IN_STALL",
+ "TMIx_L2_RD_MSG_OUT",
+ "TMIx_L2_RD_MSG_OUT_STALL",
+ "TMIx_L2_WR_MSG_OUT",
+ "TMIx_L2_ANY_LOOKUP",
+ "TMIx_L2_READ_LOOKUP",
+ "TMIx_L2_WRITE_LOOKUP",
+ "TMIx_L2_EXT_SNOOP_LOOKUP",
+ "TMIx_L2_EXT_READ",
+ "TMIx_L2_EXT_READ_NOSNP",
+ "TMIx_L2_EXT_READ_UNIQUE",
+ "TMIx_L2_EXT_READ_BEATS",
+ "TMIx_L2_EXT_AR_STALL",
+ "TMIx_L2_EXT_AR_CNT_Q1",
+ "TMIx_L2_EXT_AR_CNT_Q2",
+ "TMIx_L2_EXT_AR_CNT_Q3",
+ "TMIx_L2_EXT_RRESP_0_127",
+ "TMIx_L2_EXT_RRESP_128_191",
+ "TMIx_L2_EXT_RRESP_192_255",
+ "TMIx_L2_EXT_RRESP_256_319",
+ "TMIx_L2_EXT_RRESP_320_383",
+ "TMIx_L2_EXT_WRITE",
+ "TMIx_L2_EXT_WRITE_NOSNP_FULL",
+ "TMIx_L2_EXT_WRITE_NOSNP_PTL",
+ "TMIx_L2_EXT_WRITE_SNP_FULL",
+ "TMIx_L2_EXT_WRITE_SNP_PTL",
+ "TMIx_L2_EXT_WRITE_BEATS",
+ "TMIx_L2_EXT_W_STALL",
+ "TMIx_L2_EXT_AW_CNT_Q1",
+ "TMIx_L2_EXT_AW_CNT_Q2",
+ "TMIx_L2_EXT_AW_CNT_Q3",
+ "TMIx_L2_EXT_SNOOP",
+ "TMIx_L2_EXT_SNOOP_STALL",
+ "TMIx_L2_EXT_SNOOP_RESP_CLEAN",
+ "TMIx_L2_EXT_SNOOP_RESP_DATA",
+ "TMIx_L2_EXT_SNOOP_INTERNAL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TMIX_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_gpu_id.h b/midgard-0.r2p0/mali_kbase_gpu_id.h
new file mode 100755
index 0000000..a3377b2
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gpu_id.h
@@ -0,0 +1,113 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#ifndef _KBASE_GPU_ID_H_
+#define _KBASE_GPU_ID_H_
+
+/* GPU_ID register */
+#define GPU_ID_VERSION_STATUS_SHIFT 0
+#define GPU_ID_VERSION_MINOR_SHIFT 4
+#define GPU_ID_VERSION_MAJOR_SHIFT 12
+#define GPU_ID_VERSION_PRODUCT_ID_SHIFT 16
+#define GPU_ID_VERSION_STATUS (0xF << GPU_ID_VERSION_STATUS_SHIFT)
+#define GPU_ID_VERSION_MINOR (0xFF << GPU_ID_VERSION_MINOR_SHIFT)
+#define GPU_ID_VERSION_MAJOR (0xF << GPU_ID_VERSION_MAJOR_SHIFT)
+#define GPU_ID_VERSION_PRODUCT_ID (0xFFFF << GPU_ID_VERSION_PRODUCT_ID_SHIFT)
+
+/* Values for GPU_ID_VERSION_PRODUCT_ID bitfield */
+#define GPU_ID_PI_T60X 0x6956
+#define GPU_ID_PI_T62X 0x0620
+#define GPU_ID_PI_T76X 0x0750
+#define GPU_ID_PI_T72X 0x0720
+#define GPU_ID_PI_TFRX 0x0880
+#define GPU_ID_PI_T86X 0x0860
+#define GPU_ID_PI_T82X 0x0820
+#define GPU_ID_PI_T83X 0x0830
+
+/* New GPU ID format when PRODUCT_ID is >= 0x1000 (and not 0x6956) */
+#define GPU_ID_PI_NEW_FORMAT_START 0x1000
+#define GPU_ID_IS_NEW_FORMAT(product_id) ((product_id) != GPU_ID_PI_T60X && \
+ (product_id) >= \
+ GPU_ID_PI_NEW_FORMAT_START)
+
+#define GPU_ID2_VERSION_STATUS_SHIFT 0
+#define GPU_ID2_VERSION_MINOR_SHIFT 4
+#define GPU_ID2_VERSION_MAJOR_SHIFT 12
+#define GPU_ID2_PRODUCT_MAJOR_SHIFT 16
+#define GPU_ID2_ARCH_REV_SHIFT 20
+#define GPU_ID2_ARCH_MINOR_SHIFT 24
+#define GPU_ID2_ARCH_MAJOR_SHIFT 28
+#define GPU_ID2_VERSION_STATUS (0xF << GPU_ID2_VERSION_STATUS_SHIFT)
+#define GPU_ID2_VERSION_MINOR (0xFF << GPU_ID2_VERSION_MINOR_SHIFT)
+#define GPU_ID2_VERSION_MAJOR (0xF << GPU_ID2_VERSION_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MAJOR (0xF << GPU_ID2_PRODUCT_MAJOR_SHIFT)
+#define GPU_ID2_ARCH_REV (0xF << GPU_ID2_ARCH_REV_SHIFT)
+#define GPU_ID2_ARCH_MINOR (0xF << GPU_ID2_ARCH_MINOR_SHIFT)
+#define GPU_ID2_ARCH_MAJOR (0xF << GPU_ID2_ARCH_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MODEL (GPU_ID2_ARCH_MAJOR | GPU_ID2_PRODUCT_MAJOR)
+
+/* Helper macro to create a partial GPU_ID (new format) that defines
+ a product ignoring its version. */
+#define GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, product_major) \
+ (((arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+ ((arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT) | \
+ ((arch_rev) << GPU_ID2_ARCH_REV_SHIFT) | \
+ ((product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+
+/* Helper macro to create a partial GPU_ID (new format) that specifies the
+ revision (major, minor, status) of a product */
+#define GPU_ID2_VERSION_MAKE(version_major, version_minor, version_status) \
+ (((version_major) << GPU_ID2_VERSION_MAJOR_SHIFT) | \
+ ((version_minor) << GPU_ID2_VERSION_MINOR_SHIFT) | \
+ ((version_status) << GPU_ID2_VERSION_STATUS_SHIFT))
+
+/* Helper macro to create a complete GPU_ID (new format) */
+#define GPU_ID2_MAKE(arch_major, arch_minor, arch_rev, product_major, \
+ version_major, version_minor, version_status) \
+ (GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, \
+ product_major) | \
+ GPU_ID2_VERSION_MAKE(version_major, version_minor, \
+ version_status))
+
+/* Helper macro to create a partial GPU_ID (new format) that identifies
+ a particular GPU model by its arch_major and product_major. */
+#define GPU_ID2_MODEL_MAKE(arch_major, product_major) \
+ (((arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+ ((product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+
+/* Strip off the non-relevant bits from a product_id value and make it suitable
+ for comparison against the GPU_ID2_PRODUCT_xxx values which identify a GPU
+ model. */
+#define GPU_ID2_MODEL_MATCH_VALUE(product_id) \
+ (((product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & \
+ GPU_ID2_PRODUCT_MODEL)
+
+#define GPU_ID2_PRODUCT_TMIX GPU_ID2_MODEL_MAKE(6, 0)
+#define GPU_ID2_PRODUCT_THEX GPU_ID2_MODEL_MAKE(6, 1)
+
+/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */
+#define GPU_ID_S_15DEV0 0x1
+#define GPU_ID_S_EAC 0x2
+
+/* Helper macro to create a GPU_ID assuming valid values for id, major,
+ minor, status */
+#define GPU_ID_MAKE(id, major, minor, status) \
+ (((id) << GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \
+ ((major) << GPU_ID_VERSION_MAJOR_SHIFT) | \
+ ((minor) << GPU_ID_VERSION_MINOR_SHIFT) | \
+ ((status) << GPU_ID_VERSION_STATUS_SHIFT))
+
+#endif /* _KBASE_GPU_ID_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_gpu_memory_debugfs.c b/midgard-0.r2p0/mali_kbase_gpu_memory_debugfs.c
new file mode 100755
index 0000000..6df0a1c
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gpu_memory_debugfs.c
@@ -0,0 +1,97 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+#ifdef CONFIG_DEBUG_FS
+/** Show callback for the @c gpu_memory debugfs file.
+ *
+ * This function is called to get the contents of the @c gpu_memory debugfs
+ * file. This is a report of current gpu memory usage.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if successfully prints data in debugfs entry file
+ * -1 if it encountered an error
+ */
+
+static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
+{
+ struct list_head *entry;
+ const struct list_head *kbdev_list;
+
+ kbdev_list = kbase_dev_list_get();
+ list_for_each(entry, kbdev_list) {
+ struct kbase_device *kbdev = NULL;
+ struct kbasep_kctx_list_element *element;
+
+ kbdev = list_entry(entry, struct kbase_device, entry);
+ /* output the total memory usage and cap for this device */
+ seq_printf(sfile, "%-16s %10u\n",
+ kbdev->devname,
+ atomic_read(&(kbdev->memdev.used_pages)));
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry(element, &kbdev->kctx_list, link) {
+ /* output the memory usage and cap for each kctx
+ * opened on this device */
+ seq_printf(sfile, " %s-0x%p %10u\n",
+ "kctx",
+ element->kctx,
+ atomic_read(&(element->kctx->used_pages)));
+ }
+ mutex_unlock(&kbdev->kctx_list_lock);
+ }
+ kbase_dev_list_put(kbdev_list);
+ return 0;
+}
+
+/*
+ * File operations related to debugfs entry for gpu_memory
+ */
+static int kbasep_gpu_memory_debugfs_open(struct inode *in, struct file *file)
+{
+ return single_open(file, kbasep_gpu_memory_seq_show , NULL);
+}
+
+static const struct file_operations kbasep_gpu_memory_debugfs_fops = {
+ .open = kbasep_gpu_memory_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Initialize debugfs entry for gpu_memory
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_file("gpu_memory", S_IRUGO,
+ kbdev->mali_debugfs_directory, NULL,
+ &kbasep_gpu_memory_debugfs_fops);
+ return;
+}
+
+#else
+/*
+ * Stub functions for when debugfs is disabled
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+ return;
+}
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_gpu_memory_debugfs.h b/midgard-0.r2p0/mali_kbase_gpu_memory_debugfs.h
new file mode 100755
index 0000000..7045693
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gpu_memory_debugfs.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_gpu_memory_debugfs.h
+ * Header file for gpu_memory entry in debugfs
+ *
+ */
+
+#ifndef _KBASE_GPU_MEMORY_DEBUGFS_H
+#define _KBASE_GPU_MEMORY_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Initialize gpu_memory debugfs entry
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev);
+
+#endif /*_KBASE_GPU_MEMORY_DEBUGFS_H*/
diff --git a/midgard-0.r2p0/mali_kbase_gpuprops.c b/midgard-0.r2p0/mali_kbase_gpuprops.c
new file mode 100755
index 0000000..7f77dba
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gpuprops.c
@@ -0,0 +1,314 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Base kernel property query APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gpuprops.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_hwaccess_gpuprops.h>
+#include <linux/clk.h>
+
+/**
+ * KBASE_UBFX32 - Extracts bits from a 32-bit bitfield.
+ * @value: The value from which to extract bits.
+ * @offset: The first bit to extract (0 being the LSB).
+ * @size: The number of bits to extract.
+ *
+ * Context: @offset + @size <= 32.
+ *
+ * Return: Bits [@offset, @offset + @size) from @value.
+ */
+/* from mali_cdsb.h */
+#define KBASE_UBFX32(value, offset, size) \
+ (((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1))
+
+int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props)
+{
+ kbase_gpu_clk_speed_func get_gpu_speed_mhz;
+ u32 gpu_speed_mhz;
+ int rc = 1;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != kbase_props);
+
+ /* Current GPU speed is requested from the system integrator via the GPU_SPEED_FUNC function.
+ * If that function fails, or the function is not provided by the system integrator, we report the maximum
+ * GPU speed as specified by GPU_FREQ_KHZ_MAX.
+ */
+ get_gpu_speed_mhz = (kbase_gpu_clk_speed_func) GPU_SPEED_FUNC;
+ if (get_gpu_speed_mhz != NULL) {
+ rc = get_gpu_speed_mhz(&gpu_speed_mhz);
+#ifdef CONFIG_MALI_DEBUG
+ /* Issue a warning message when the reported GPU speed falls outside the min/max range */
+ if (rc == 0) {
+ u32 gpu_speed_khz = gpu_speed_mhz * 1000;
+
+ if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min ||
+ gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max)
+ dev_warn(kctx->kbdev->dev, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)\n",
+ (unsigned long)gpu_speed_khz,
+ (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min,
+ (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
+ }
+#endif /* CONFIG_MALI_DEBUG */
+ }
+ if (kctx->kbdev->clock) {
+ gpu_speed_mhz = clk_get_rate(kctx->kbdev->clock) / 1000000;
+ rc = 0;
+ }
+ if (rc != 0)
+ gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000;
+
+ kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz;
+
+ memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props));
+
+ /* Before API 8.2 they expect L3 cache info here, which was always 0 */
+ if (kctx->api_version < KBASE_API_VERSION(8, 2))
+ kbase_props->props.raw_props.suspend_size = 0;
+
+ return 0;
+}
+
+static void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props)
+{
+ struct mali_base_gpu_coherent_group *current_group;
+ u64 group_present;
+ u64 group_mask;
+ u64 first_set, first_set_prev;
+ u32 num_groups = 0;
+
+ KBASE_DEBUG_ASSERT(NULL != props);
+
+ props->coherency_info.coherency = props->raw_props.mem_features;
+ props->coherency_info.num_core_groups = hweight64(props->raw_props.l2_present);
+
+ if (props->coherency_info.coherency & GROUPS_L2_COHERENT) {
+ /* Group is l2 coherent */
+ group_present = props->raw_props.l2_present;
+ } else {
+ /* Group is l1 coherent */
+ group_present = props->raw_props.shader_present;
+ }
+
+ /*
+ * The coherent group mask can be computed from the l2 present
+ * register.
+ *
+ * For the coherent group n:
+ * group_mask[n] = (first_set[n] - 1) & ~(first_set[n-1] - 1)
+ * where first_set is group_present with only its nth set-bit kept
+ * (i.e. the position from where a new group starts).
+ *
+ * For instance if the groups are l2 coherent and l2_present=0x0..01111:
+ * The first mask is:
+ * group_mask[1] = (first_set[1] - 1) & ~(first_set[0] - 1)
+ * = (0x0..010 - 1) & ~(0x0..01 - 1)
+ * = 0x0..00f
+ * The second mask is:
+ * group_mask[2] = (first_set[2] - 1) & ~(first_set[1] - 1)
+ * = (0x0..100 - 1) & ~(0x0..010 - 1)
+ * = 0x0..0f0
+ * And so on until all the bits from group_present have been cleared
+ * (i.e. there is no group left).
+ */
+
+ current_group = props->coherency_info.group;
+ first_set = group_present & ~(group_present - 1);
+
+ while (group_present != 0 && num_groups < BASE_MAX_COHERENT_GROUPS) {
+ group_present -= first_set; /* Clear the current group bit */
+ first_set_prev = first_set;
+
+ first_set = group_present & ~(group_present - 1);
+ group_mask = (first_set - 1) & ~(first_set_prev - 1);
+
+ /* Populate the coherent_group structure for each group */
+ current_group->core_mask = group_mask & props->raw_props.shader_present;
+ current_group->num_cores = hweight64(current_group->core_mask);
+
+ num_groups++;
+ current_group++;
+ }
+
+ if (group_present != 0)
+ pr_warn("Too many coherent groups (keeping only %d groups).\n", BASE_MAX_COHERENT_GROUPS);
+
+ props->coherency_info.num_groups = num_groups;
+}
+
+/**
+ * kbase_gpuprops_get_props - Get the GPU configuration
+ * @gpu_props: The &base_gpu_props structure
+ * @kbdev: The &struct kbase_device structure for the device
+ *
+ * Fill the &base_gpu_props structure with values from the GPU configuration
+ * registers. Only the raw properties are filled in this function
+ */
+static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
+{
+ struct kbase_gpuprops_regdump regdump;
+ int i;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != gpu_props);
+
+ /* Dump relevant registers */
+ kbase_backend_gpuprops_get(kbdev, &regdump);
+
+ gpu_props->raw_props.gpu_id = regdump.gpu_id;
+ gpu_props->raw_props.tiler_features = regdump.tiler_features;
+ gpu_props->raw_props.mem_features = regdump.mem_features;
+ gpu_props->raw_props.mmu_features = regdump.mmu_features;
+ gpu_props->raw_props.l2_features = regdump.l2_features;
+ gpu_props->raw_props.suspend_size = regdump.suspend_size;
+
+ gpu_props->raw_props.as_present = regdump.as_present;
+ gpu_props->raw_props.js_present = regdump.js_present;
+ gpu_props->raw_props.shader_present = ((u64) regdump.shader_present_hi << 32) + regdump.shader_present_lo;
+ gpu_props->raw_props.tiler_present = ((u64) regdump.tiler_present_hi << 32) + regdump.tiler_present_lo;
+ gpu_props->raw_props.l2_present = ((u64) regdump.l2_present_hi << 32) + regdump.l2_present_lo;
+
+ for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
+ gpu_props->raw_props.js_features[i] = regdump.js_features[i];
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ gpu_props->raw_props.texture_features[i] = regdump.texture_features[i];
+
+ gpu_props->raw_props.thread_max_barrier_size = regdump.thread_max_barrier_size;
+ gpu_props->raw_props.thread_max_threads = regdump.thread_max_threads;
+ gpu_props->raw_props.thread_max_workgroup_size = regdump.thread_max_workgroup_size;
+ gpu_props->raw_props.thread_features = regdump.thread_features;
+}
+
+/**
+ * kbase_gpuprops_calculate_props - Calculate the derived properties
+ * @gpu_props: The &base_gpu_props structure
+ * @kbdev: The &struct kbase_device structure for the device
+ *
+ * Fill the &base_gpu_props structure with values derived from the GPU
+ * configuration registers
+ */
+static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
+{
+ int i;
+
+ /* Populate the base_gpu_props structure */
+ gpu_props->core_props.version_status = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 0U, 4);
+ gpu_props->core_props.minor_revision = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 4U, 8);
+ gpu_props->core_props.major_revision = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 12U, 4);
+ gpu_props->core_props.product_id = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 16U, 16);
+ gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
+ gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i];
+
+ gpu_props->l2_props.log2_line_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 0U, 8);
+ gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8);
+
+ /* Field with number of l2 slices is added to MEM_FEATURES register
+ * since t76x. Below code assumes that for older GPU reserved bits will
+ * be read as zero. */
+ gpu_props->l2_props.num_l2_slices =
+ KBASE_UBFX32(gpu_props->raw_props.mem_features, 8U, 4) + 1;
+
+ gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(gpu_props->raw_props.tiler_features, 0U, 6);
+ gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(gpu_props->raw_props.tiler_features, 8U, 4);
+
+ if (gpu_props->raw_props.thread_max_threads == 0)
+ gpu_props->thread_props.max_threads = THREAD_MT_DEFAULT;
+ else
+ gpu_props->thread_props.max_threads = gpu_props->raw_props.thread_max_threads;
+
+ if (gpu_props->raw_props.thread_max_workgroup_size == 0)
+ gpu_props->thread_props.max_workgroup_size = THREAD_MWS_DEFAULT;
+ else
+ gpu_props->thread_props.max_workgroup_size = gpu_props->raw_props.thread_max_workgroup_size;
+
+ if (gpu_props->raw_props.thread_max_barrier_size == 0)
+ gpu_props->thread_props.max_barrier_size = THREAD_MBS_DEFAULT;
+ else
+ gpu_props->thread_props.max_barrier_size = gpu_props->raw_props.thread_max_barrier_size;
+
+ gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 16);
+ gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 16U, 8);
+ gpu_props->thread_props.max_thread_group_split = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 6);
+ gpu_props->thread_props.impl_tech = KBASE_UBFX32(gpu_props->raw_props.thread_features, 30U, 2);
+
+ /* If values are not specified, then use defaults */
+ if (gpu_props->thread_props.max_registers == 0) {
+ gpu_props->thread_props.max_registers = THREAD_MR_DEFAULT;
+ gpu_props->thread_props.max_task_queue = THREAD_MTQ_DEFAULT;
+ gpu_props->thread_props.max_thread_group_split = THREAD_MTGS_DEFAULT;
+ }
+ /* Initialize the coherent_group structure for each group */
+ kbase_gpuprops_construct_coherent_groups(gpu_props);
+}
+
+void kbase_gpuprops_set(struct kbase_device *kbdev)
+{
+ struct kbase_gpu_props *gpu_props;
+ struct gpu_raw_gpu_props *raw;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ gpu_props = &kbdev->gpu_props;
+ raw = &gpu_props->props.raw_props;
+
+ /* Initialize the base_gpu_props structure from the hardware */
+ kbase_gpuprops_get_props(&gpu_props->props, kbdev);
+
+ /* Populate the derived properties */
+ kbase_gpuprops_calculate_props(&gpu_props->props, kbdev);
+
+ /* Populate kbase-only fields */
+ gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8);
+ gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8);
+
+ gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1);
+
+ gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8);
+ gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8);
+
+ gpu_props->num_cores = hweight64(raw->shader_present);
+ gpu_props->num_core_groups = hweight64(raw->l2_present);
+ gpu_props->num_address_spaces = hweight32(raw->as_present);
+ gpu_props->num_job_slots = hweight32(raw->js_present);
+}
+
+void kbase_gpuprops_set_features(struct kbase_device *kbdev)
+{
+ base_gpu_props *gpu_props;
+ struct kbase_gpuprops_regdump regdump;
+
+ gpu_props = &kbdev->gpu_props.props;
+
+ /* Dump relevant registers */
+ kbase_backend_gpuprops_get_features(kbdev, &regdump);
+
+ /*
+ * Copy the raw value from the register, later this will get turned
+ * into the selected coherency mode.
+ */
+ gpu_props->raw_props.coherency_mode = regdump.coherency_features;
+}
diff --git a/midgard-0.r2p0/mali_kbase_gpuprops.h b/midgard-0.r2p0/mali_kbase_gpuprops.h
new file mode 100755
index 0000000..f3c95cc
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gpuprops.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_gpuprops.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_H_
+#define _KBASE_GPUPROPS_H_
+
+#include "mali_kbase_gpuprops_types.h"
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/**
+ * @brief Set up Kbase GPU properties.
+ *
+ * Set up Kbase GPU properties with information from the GPU registers
+ *
+ * @param kbdev The struct kbase_device structure for the device
+ */
+void kbase_gpuprops_set(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_set_features - Set up Kbase GPU properties
+ * @kbdev: Device pointer
+ *
+ * This function sets up GPU properties that are dependent on the hardware
+ * features bitmask. This function must be preceeded by a call to
+ * kbase_hw_set_features_mask().
+ */
+void kbase_gpuprops_set_features(struct kbase_device *kbdev);
+
+/**
+ * @brief Provide GPU properties to userside through UKU call.
+ *
+ * Fill the struct kbase_uk_gpuprops with values from GPU configuration registers.
+ *
+ * @param kctx The struct kbase_context structure
+ * @param kbase_props A copy of the struct kbase_uk_gpuprops structure from userspace
+ *
+ * @return 0 on success. Any other value indicates failure.
+ */
+int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props);
+
+#endif /* _KBASE_GPUPROPS_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_gpuprops_types.h b/midgard-0.r2p0/mali_kbase_gpuprops_types.h
new file mode 100755
index 0000000..f42e91b
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_gpuprops_types.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_gpuprops_types.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_TYPES_H_
+#define _KBASE_GPUPROPS_TYPES_H_
+
+#include "mali_base_kernel.h"
+
+#define KBASE_GPU_SPEED_MHZ 123
+#define KBASE_GPU_PC_SIZE_LOG2 24U
+
+struct kbase_gpuprops_regdump {
+ u32 gpu_id;
+ u32 l2_features;
+ u32 suspend_size; /* API 8.2+ */
+ u32 tiler_features;
+ u32 mem_features;
+ u32 mmu_features;
+ u32 as_present;
+ u32 js_present;
+ u32 thread_max_threads;
+ u32 thread_max_workgroup_size;
+ u32 thread_max_barrier_size;
+ u32 thread_features;
+ u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+ u32 js_features[GPU_MAX_JOB_SLOTS];
+ u32 shader_present_lo;
+ u32 shader_present_hi;
+ u32 tiler_present_lo;
+ u32 tiler_present_hi;
+ u32 l2_present_lo;
+ u32 l2_present_hi;
+ u32 coherency_features;
+};
+
+struct kbase_gpu_cache_props {
+ u8 associativity;
+ u8 external_bus_width;
+};
+
+struct kbase_gpu_mem_props {
+ u8 core_group;
+};
+
+struct kbase_gpu_mmu_props {
+ u8 va_bits;
+ u8 pa_bits;
+};
+
+struct kbase_gpu_props {
+ /* kernel-only properties */
+ u8 num_cores;
+ u8 num_core_groups;
+ u8 num_address_spaces;
+ u8 num_job_slots;
+
+ struct kbase_gpu_cache_props l2_props;
+
+ struct kbase_gpu_mem_props mem;
+ struct kbase_gpu_mmu_props mmu;
+
+ /**
+ * Implementation specific irq throttle value (us), should be adjusted during integration.
+ */
+ int irq_throttle_time_us;
+
+ /* Properties shared with userspace */
+ base_gpu_props props;
+};
+
+#endif /* _KBASE_GPUPROPS_TYPES_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hw.c b/midgard-0.r2p0/mali_kbase_hw.c
new file mode 100755
index 0000000..1d7e5e9
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hw.c
@@ -0,0 +1,272 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Run-time work-arounds helpers
+ */
+
+#include <mali_base_hwconfig_features.h>
+#include <mali_base_hwconfig_issues.h>
+#include <mali_midg_regmap.h>
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+
+void kbase_hw_set_features_mask(struct kbase_device *kbdev)
+{
+ const enum base_hw_feature *features;
+ u32 gpu_id;
+ u32 product_id;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
+ product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+ case GPU_ID2_PRODUCT_TMIX:
+ features = base_hw_features_tMIx;
+ break;
+ case GPU_ID2_PRODUCT_THEX:
+ features = base_hw_features_tHEx;
+ break;
+ default:
+ features = base_hw_features_generic;
+ break;
+ }
+ } else {
+ switch (product_id) {
+ case GPU_ID_PI_TFRX:
+ /* FALLTHROUGH */
+ case GPU_ID_PI_T86X:
+ features = base_hw_features_tFxx;
+ break;
+ case GPU_ID_PI_T83X:
+ features = base_hw_features_t83x;
+ break;
+ case GPU_ID_PI_T82X:
+ features = base_hw_features_t82x;
+ break;
+ case GPU_ID_PI_T76X:
+ features = base_hw_features_t76x;
+ break;
+ case GPU_ID_PI_T72X:
+ features = base_hw_features_t72x;
+ break;
+ case GPU_ID_PI_T62X:
+ features = base_hw_features_t62x;
+ break;
+ case GPU_ID_PI_T60X:
+ features = base_hw_features_t60x;
+ break;
+ default:
+ features = base_hw_features_generic;
+ break;
+ }
+ }
+
+ for (; *features != BASE_HW_FEATURE_END; features++)
+ set_bit(*features, &kbdev->hw_features_mask[0]);
+}
+
+int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
+{
+ const enum base_hw_issue *issues;
+ u32 gpu_id;
+ u32 product_id;
+ u32 impl_tech;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
+ product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ impl_tech = kbdev->gpu_props.props.thread_props.impl_tech;
+
+ if (impl_tech != IMPLEMENTATION_MODEL) {
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ switch (gpu_id) {
+ case GPU_ID2_MAKE(6, 0, 10, 0, 0, 0, 1):
+ issues = base_hw_issues_tMIx_r0p0_05dev0;
+ break;
+ case GPU_ID2_MAKE(6, 0, 10, 0, 0, 0, 2):
+ issues = base_hw_issues_tMIx_r0p0;
+ break;
+ default:
+ if ((gpu_id & GPU_ID2_PRODUCT_MODEL) ==
+ GPU_ID2_PRODUCT_TMIX) {
+ issues = base_hw_issues_tMIx_r0p0;
+ } else if ((gpu_id & GPU_ID2_PRODUCT_MODEL) ==
+ GPU_ID2_PRODUCT_THEX) {
+ issues = base_hw_issues_tHEx_r0p0;
+ } else {
+ dev_err(kbdev->dev,
+ "Unknown GPU ID %x", gpu_id);
+ return -EINVAL;
+ }
+ }
+ } else {
+ switch (gpu_id) {
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_15DEV0):
+ issues = base_hw_issues_t60x_r0p0_15dev0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_EAC):
+ issues = base_hw_issues_t60x_r0p0_eac;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 1, 0):
+ issues = base_hw_issues_t60x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0):
+ issues = base_hw_issues_t62x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 1):
+ issues = base_hw_issues_t62x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 1, 0):
+ issues = base_hw_issues_t62x_r1p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 1):
+ issues = base_hw_issues_t76x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 1):
+ issues = base_hw_issues_t76x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9):
+ issues = base_hw_issues_t76x_r0p1_50rel0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1):
+ issues = base_hw_issues_t76x_r0p2;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1):
+ issues = base_hw_issues_t76x_r0p3;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0):
+ issues = base_hw_issues_t76x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 1):
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 2):
+ issues = base_hw_issues_t72x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0):
+ issues = base_hw_issues_t72x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 1, 0):
+ issues = base_hw_issues_t72x_r1p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 1, 2):
+ issues = base_hw_issues_tFRx_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 2, 0):
+ issues = base_hw_issues_tFRx_r0p2;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 8):
+ issues = base_hw_issues_tFRx_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 2, 0, 0):
+ issues = base_hw_issues_tFRx_r2p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 0, 2, 0):
+ issues = base_hw_issues_t86x_r0p2;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 8):
+ issues = base_hw_issues_t86x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 2, 0, 0):
+ issues = base_hw_issues_t86x_r2p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T83X, 0, 1, 0):
+ issues = base_hw_issues_t83x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 8):
+ issues = base_hw_issues_t83x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 0, 0):
+ issues = base_hw_issues_t82x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 1, 0):
+ issues = base_hw_issues_t82x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 8):
+ issues = base_hw_issues_t82x_r1p0;
+ break;
+ default:
+ dev_err(kbdev->dev,
+ "Unknown GPU ID %x", gpu_id);
+ return -EINVAL;
+ }
+ }
+ } else {
+ /* Software model */
+ if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+ switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+ case GPU_ID2_PRODUCT_TMIX:
+ issues = base_hw_issues_model_tMIx;
+ break;
+ case GPU_ID2_PRODUCT_THEX:
+ issues = base_hw_issues_model_tHEx;
+ break;
+ default:
+ dev_err(kbdev->dev,
+ "Unknown GPU ID %x", gpu_id);
+ return -EINVAL;
+ }
+ } else {
+ switch (product_id) {
+ case GPU_ID_PI_T60X:
+ issues = base_hw_issues_model_t60x;
+ break;
+ case GPU_ID_PI_T62X:
+ issues = base_hw_issues_model_t62x;
+ break;
+ case GPU_ID_PI_T72X:
+ issues = base_hw_issues_model_t72x;
+ break;
+ case GPU_ID_PI_T76X:
+ issues = base_hw_issues_model_t76x;
+ break;
+ case GPU_ID_PI_TFRX:
+ issues = base_hw_issues_model_tFRx;
+ break;
+ case GPU_ID_PI_T86X:
+ issues = base_hw_issues_model_t86x;
+ break;
+ case GPU_ID_PI_T83X:
+ issues = base_hw_issues_model_t83x;
+ break;
+ case GPU_ID_PI_T82X:
+ issues = base_hw_issues_model_t82x;
+ break;
+ default:
+ dev_err(kbdev->dev, "Unknown GPU ID %x",
+ gpu_id);
+ return -EINVAL;
+ }
+ }
+ }
+
+ dev_info(kbdev->dev, "GPU identified as 0x%04x r%dp%d status %d", (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >> GPU_ID_VERSION_PRODUCT_ID_SHIFT, (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT, (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT, (gpu_id & GPU_ID_VERSION_STATUS) >> GPU_ID_VERSION_STATUS_SHIFT);
+
+ for (; *issues != BASE_HW_ISSUE_END; issues++)
+ set_bit(*issues, &kbdev->hw_issues_mask[0]);
+
+ return 0;
+}
diff --git a/midgard-0.r2p0/mali_kbase_hw.h b/midgard-0.r2p0/mali_kbase_hw.h
new file mode 100755
index 0000000..fce7d29
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hw.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file
+ * Run-time work-arounds helpers
+ */
+
+#ifndef _KBASE_HW_H_
+#define _KBASE_HW_H_
+
+#include "mali_kbase_defs.h"
+
+/**
+ * @brief Tell whether a work-around should be enabled
+ */
+#define kbase_hw_has_issue(kbdev, issue)\
+ test_bit(issue, &(kbdev)->hw_issues_mask[0])
+
+/**
+ * @brief Tell whether a feature is supported
+ */
+#define kbase_hw_has_feature(kbdev, feature)\
+ test_bit(feature, &(kbdev)->hw_features_mask[0])
+
+/**
+ * @brief Set the HW issues mask depending on the GPU ID
+ */
+int kbase_hw_set_issues_mask(struct kbase_device *kbdev);
+
+/**
+ * @brief Set the features mask depending on the GPU ID
+ */
+void kbase_hw_set_features_mask(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HW_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hwaccess_backend.h b/midgard-0.r2p0/mali_kbase_hwaccess_backend.h
new file mode 100755
index 0000000..b09be99
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hwaccess_backend.h
@@ -0,0 +1,54 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * HW access backend common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_BACKEND_H_
+#define _KBASE_HWACCESS_BACKEND_H_
+
+/**
+ * kbase_backend_early_init - Perform any backend-specific initialization.
+ * @kbdev: Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_early_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_late_init - Perform any backend-specific initialization.
+ * @kbdev: Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_late_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_early_term - Perform any backend-specific termination.
+ * @kbdev: Device pointer
+ */
+void kbase_backend_early_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_late_term - Perform any backend-specific termination.
+ * @kbdev: Device pointer
+ */
+void kbase_backend_late_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_BACKEND_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hwaccess_defs.h b/midgard-0.r2p0/mali_kbase_hwaccess_defs.h
new file mode 100755
index 0000000..0acf297
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hwaccess_defs.h
@@ -0,0 +1,36 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/**
+ * @file mali_kbase_hwaccess_gpu_defs.h
+ * HW access common definitions
+ */
+
+#ifndef _KBASE_HWACCESS_DEFS_H_
+#define _KBASE_HWACCESS_DEFS_H_
+
+#include <mali_kbase_jm_defs.h>
+
+/* The hwaccess_lock (a spinlock) must be held when accessing this structure */
+struct kbase_hwaccess_data {
+ struct kbase_context *active_kctx;
+
+ struct kbase_backend_data backend;
+};
+
+#endif /* _KBASE_HWACCESS_DEFS_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hwaccess_gpuprops.h b/midgard-0.r2p0/mali_kbase_hwaccess_gpuprops.h
new file mode 100755
index 0000000..cf8a813
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hwaccess_gpuprops.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/**
+ * Base kernel property query backend APIs
+ */
+
+#ifndef _KBASE_HWACCESS_GPUPROPS_H_
+#define _KBASE_HWACCESS_GPUPROPS_H_
+
+/**
+ * kbase_backend_gpuprops_get() - Fill @regdump with GPU properties read from
+ * GPU
+ * @kbdev: Device pointer
+ * @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ */
+void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump);
+
+/**
+ * kbase_backend_gpuprops_get - Fill @regdump with GPU properties read from GPU
+ * @kbdev: Device pointer
+ * @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * This function reads GPU properties that are dependent on the hardware
+ * features bitmask
+ */
+void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+ struct kbase_gpuprops_regdump *regdump);
+
+
+#endif /* _KBASE_HWACCESS_GPUPROPS_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hwaccess_instr.h b/midgard-0.r2p0/mali_kbase_hwaccess_instr.h
new file mode 100755
index 0000000..5de2b75
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hwaccess_instr.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * HW Access instrumentation common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_INSTR_H_
+#define _KBASE_HWACCESS_INSTR_H_
+
+#include <mali_kbase_instr_defs.h>
+
+/**
+ * kbase_instr_hwcnt_enable_internal - Enable HW counters collection
+ * @kbdev: Kbase device
+ * @kctx: Kbase context
+ * @setup: HW counter setup parameters
+ *
+ * Context: might sleep, waiting for reset to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_uk_hwcnt_setup *setup);
+
+/**
+ * kbase_instr_hwcnt_disable_internal - Disable HW counters collection
+ * @kctx: Kbase context
+ *
+ * Context: might sleep, waiting for an ongoing dump to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_request_dump() - Request HW counter dump from GPU
+ * @kctx: Kbase context
+ *
+ * Caller must either wait for kbase_instr_hwcnt_dump_complete() to return true,
+ * of call kbase_instr_hwcnt_wait_for_dump().
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_wait_for_dump() - Wait until pending HW counter dump has
+ * completed.
+ * @kctx: Kbase context
+ *
+ * Context: will sleep, waiting for dump to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_dump_complete - Tell whether the HW counters dump has
+ * completed
+ * @kctx: Kbase context
+ * @success: Set to true if successful
+ *
+ * Context: does not sleep.
+ *
+ * Return: true if the dump is complete
+ */
+bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+ bool * const success);
+
+/**
+ * kbase_instr_hwcnt_clear() - Clear HW counters
+ * @kctx: Kbase context
+ *
+ * Context: might sleep, waiting for reset to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_clear(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_backend_init() - Initialise the instrumentation backend
+ * @kbdev: Kbase device
+ *
+ * This function should be called during driver initialization.
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_backend_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_instr_backend_init() - Terminate the instrumentation backend
+ * @kbdev: Kbase device
+ *
+ * This function should be called during driver termination.
+ */
+void kbase_instr_backend_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_INSTR_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hwaccess_jm.h b/midgard-0.r2p0/mali_kbase_hwaccess_jm.h
new file mode 100755
index 0000000..c2c3909
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hwaccess_jm.h
@@ -0,0 +1,377 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * HW access job manager common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_JM_H_
+#define _KBASE_HWACCESS_JM_H_
+
+/**
+ * kbase_backend_run_atom() - Run an atom on the GPU
+ * @kbdev: Device pointer
+ * @atom: Atom to run
+ *
+ * Caller must hold the HW access lock
+ */
+void kbase_backend_run_atom(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+/**
+ * kbase_backend_slot_update - Update state based on slot ringbuffers
+ *
+ * @kbdev: Device pointer
+ *
+ * Inspect the jobs in the slot ringbuffers and update state.
+ *
+ * This will cause jobs to be submitted to hardware if they are unblocked
+ */
+void kbase_backend_slot_update(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_find_free_address_space() - Find a free address space.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer
+ *
+ * If no address spaces are currently free, then this function can evict an
+ * idle context from the runpool, freeing up the address space it was using.
+ *
+ * The address space is marked as in use. The caller must either assign a
+ * context using kbase_gpu_use_ctx(), or release it using
+ * kbase_gpu_release_free_address_space()
+ *
+ * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
+ * available
+ */
+int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_backend_release_free_address_space() - Release an address space.
+ * @kbdev: Device pointer
+ * @as_nr: Address space to release
+ *
+ * The address space must have been returned by
+ * kbase_gpu_find_free_address_space().
+ */
+void kbase_backend_release_free_address_space(struct kbase_device *kbdev,
+ int as_nr);
+
+/**
+ * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
+ * provided address space.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer. May be NULL
+ * @as_nr: Free address space to use
+ *
+ * kbase_gpu_next_job() will pull atoms from the active context.
+ *
+ * Return: true if successful, false if ASID not assigned.
+ */
+bool kbase_backend_use_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int as_nr);
+
+/**
+ * kbase_backend_use_ctx_sched() - Activate a context.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer
+ *
+ * kbase_gpu_next_job() will pull atoms from the active context.
+ *
+ * The context must already be scheduled and assigned to an address space. If
+ * the context is not scheduled, then kbase_gpu_use_ctx() should be used
+ * instead.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if context is now active, false otherwise (ie if context does
+ * not have an address space assigned)
+ */
+bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
+ * de-assign the assigned address space.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer
+ *
+ * Caller must hold kbase_device->mmu_hw_mutex and hwaccess_lock
+ */
+void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
+ * de-assign the assigned address space.
+ * @kbdev: Device pointer
+ * @kctx: Context pointer
+ *
+ * Caller must hold kbase_device->mmu_hw_mutex
+ *
+ * This function must perform any operations that could not be performed in IRQ
+ * context by kbase_backend_release_ctx_irq().
+ */
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * kbase_backend_complete_wq() - Perform backend-specific actions required on
+ * completing an atom.
+ * @kbdev: Device pointer
+ * @katom: Pointer to the atom to complete
+ *
+ * This function should only be called from kbase_jd_done_worker() or
+ * js_return_worker().
+ *
+ * Return: true if atom has completed, false if atom should be re-submitted
+ */
+void kbase_backend_complete_wq(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+/**
+ * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
+ * required on completing an atom, after
+ * any scheduling has taken place.
+ * @kbdev: Device pointer
+ * @core_req: Core requirements of atom
+ * @affinity: Affinity of atom
+ * @coreref_state: Coreref state of atom
+ *
+ * This function should only be called from kbase_jd_done_worker() or
+ * js_return_worker().
+ */
+void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
+ base_jd_core_req core_req, u64 affinity,
+ enum kbase_atom_coreref_state coreref_state);
+
+/**
+ * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
+ * and remove any others from the ringbuffers.
+ * @kbdev: Device pointer
+ * @end_timestamp: Timestamp of reset
+ */
+void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
+
+/**
+ * kbase_backend_inspect_head() - Return the atom currently at the head of slot
+ * @js
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Atom currently at the head of slot @js, or NULL
+ */
+struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
+ int js);
+
+/**
+ * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
+ * @js
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Atom currently at the head of slot @js, or NULL
+ */
+struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
+ int js);
+
+/**
+ * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
+ * slot.
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Number of atoms currently on slot
+ */
+int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
+ * that are currently on the GPU.
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Number of atoms currently on slot @js that are currently on the GPU.
+ */
+int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
+ * has changed.
+ * @kbdev: Device pointer
+ *
+ * Perform any required backend-specific actions (eg starting/stopping
+ * scheduling timers).
+ */
+void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timeouts_changed() - Job Scheduler timeouts have changed.
+ * @kbdev: Device pointer
+ *
+ * Perform any required backend-specific actions (eg updating timeouts of
+ * currently running atoms).
+ */
+void kbase_backend_timeouts_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_slot_free() - Return the number of jobs that can be currently
+ * submitted to slot @js.
+ * @kbdev: Device pointer
+ * @js: Job slot to inspect
+ *
+ * Return : Number of jobs that can be submitted.
+ */
+int kbase_backend_slot_free(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_job_check_enter_disjoint - potentially leave disjoint state
+ * @kbdev: kbase device
+ * @target_katom: atom which is finishing
+ *
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+ struct kbase_jd_atom *target_katom);
+
+/**
+ * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently
+ * running from a context
+ * @kctx: Context pointer
+ *
+ * This is used in response to a page fault to remove all jobs from the faulting
+ * context from the hardware.
+ */
+void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx);
+
+/**
+ * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
+ * to be descheduled.
+ * @kctx: Context pointer
+ *
+ * This should be called following kbase_js_zap_context(), to ensure the context
+ * can be safely destroyed.
+ */
+void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
+
+/**
+ * kbase_backend_get_current_flush_id - Return the current flush ID
+ *
+ * @kbdev: Device pointer
+ *
+ * Return: the current flush ID to be recorded for each job chain
+ */
+u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
+
+#if KBASE_GPU_RESET_EN
+/**
+ * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU.
+ * @kbdev: Device pointer
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return: a boolean which should be interpreted as follows:
+ * - true - Prepared for reset, kbase_reset_gpu should be called.
+ * - false - Another thread is performing a reset, kbase_reset_gpu should
+ * not be called.
+ */
+bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu - Reset the GPU
+ * @kbdev: Device pointer
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu if it returns
+ * true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for kbdev->reset_waitq to be
+ * signalled to know when the reset has completed.
+ */
+void kbase_reset_gpu(struct kbase_device *kbdev);
+
+/**
+ * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU.
+ * @kbdev: Device pointer
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return: a boolean which should be interpreted as follows:
+ * - true - Prepared for reset, kbase_reset_gpu should be called.
+ * - false - Another thread is performing a reset, kbase_reset_gpu should
+ * not be called.
+ */
+bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_locked - Reset the GPU
+ * @kbdev: Device pointer
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu if it
+ * returns true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for kbdev->reset_waitq to be
+ * signalled to know when the reset has completed.
+ */
+void kbase_reset_gpu_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_silent - Reset the GPU silently
+ * @kbdev: Device pointer
+ *
+ * Reset the GPU without trying to cancel jobs and don't emit messages into
+ * the kernel log while doing the reset.
+ *
+ * This function should be used in cases where we are doing a controlled reset
+ * of the GPU as part of normal processing (e.g. exiting protected mode) where
+ * the driver will have ensured the scheduler has been idled and all other
+ * users of the GPU (e.g. instrumentation) have been suspended.
+ */
+void kbase_reset_gpu_silent(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_active - Reports if the GPU is being reset
+ * @kbdev: Device pointer
+ *
+ * Return: True if the GPU is in the process of being reset.
+ */
+bool kbase_reset_gpu_active(struct kbase_device *kbdev);
+#endif
+
+/**
+ * kbase_job_slot_hardstop - Hard-stop the specified job slot
+ * @kctx: The kbase context that contains the job(s) that should
+ * be hard-stopped
+ * @js: The job slot to hard-stop
+ * @target_katom: The job that should be hard-stopped (or NULL for all
+ * jobs from the context)
+ * Context:
+ * The job slot lock must be held when calling this function.
+ */
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+ struct kbase_jd_atom *target_katom);
+
+#endif /* _KBASE_HWACCESS_JM_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hwaccess_pm.h b/midgard-0.r2p0/mali_kbase_hwaccess_pm.h
new file mode 100755
index 0000000..71c7d49
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hwaccess_pm.h
@@ -0,0 +1,209 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/**
+ * @file mali_kbase_hwaccess_pm.h
+ * HW access power manager common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_PM_H_
+#define _KBASE_HWACCESS_PM_H_
+
+#include <mali_midg_regmap.h>
+#include <linux/atomic.h>
+
+#include <mali_kbase_pm_defs.h>
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/* Functions common to all HW access backends */
+
+/**
+ * Initialize the power management framework.
+ *
+ * Must be called before any other power management function
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ *
+ * @return 0 if the power management framework was successfully
+ * initialized.
+ */
+int kbase_hwaccess_pm_init(struct kbase_device *kbdev);
+
+/**
+ * Terminate the power management framework.
+ *
+ * No power management functions may be called after this (except
+ * @ref kbase_pm_init)
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_hwaccess_pm_powerup - Power up the GPU.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @flags: Flags to pass on to kbase_pm_init_hw
+ *
+ * Power up GPU after all modules have been initialized and interrupt handlers
+ * installed.
+ *
+ * Return: 0 if powerup was successful.
+ */
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+ unsigned int flags);
+
+/**
+ * Halt the power management framework.
+ *
+ * Should ensure that no new interrupts are generated, but allow any currently
+ * running interrupt handlers to complete successfully. The GPU is forced off by
+ * the time this function returns, regardless of whether or not the active power
+ * policy asks for the GPU to be powered off.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_halt(struct kbase_device *kbdev);
+
+/**
+ * Perform any backend-specific actions to suspend the GPU
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev);
+
+/**
+ * Perform any backend-specific actions to resume the GPU from a suspend
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_resume(struct kbase_device *kbdev);
+
+/**
+ * Perform any required actions for activating the GPU. Called when the first
+ * context goes active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev);
+
+/**
+ * Perform any required actions for idling the GPU. Called when the last
+ * context goes idle.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ */
+void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev);
+
+
+/**
+ * Set the debug core mask.
+ *
+ * This determines which cores the power manager is allowed to use.
+ *
+ * @param kbdev The kbase device structure for the device (must be a
+ * valid pointer)
+ * @param new_core_mask_js0 The core mask to use for job slot 0
+ * @param new_core_mask_js0 The core mask to use for job slot 1
+ * @param new_core_mask_js0 The core mask to use for job slot 2
+ */
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+ u64 new_core_mask_js0, u64 new_core_mask_js1,
+ u64 new_core_mask_js2);
+
+
+/**
+ * Get the current policy.
+ *
+ * Returns the policy that is currently active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ *
+ * @return The current policy
+ */
+const struct kbase_pm_ca_policy
+*kbase_pm_ca_get_policy(struct kbase_device *kbdev);
+
+/**
+ * Change the policy to the one specified.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ * @param policy The policy to change to (valid pointer returned from
+ * @ref kbase_pm_ca_list_policies)
+ */
+void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
+ const struct kbase_pm_ca_policy *policy);
+
+/**
+ * Retrieve a static list of the available policies.
+ *
+ * @param[out] policies An array pointer to take the list of policies. This may
+ * be NULL. The contents of this array must not be
+ * modified.
+ *
+ * @return The number of policies
+ */
+int
+kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **policies);
+
+
+/**
+ * Get the current policy.
+ *
+ * Returns the policy that is currently active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ *
+ * @return The current policy
+ */
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev);
+
+/**
+ * Change the policy to the one specified.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ * pointer)
+ * @param policy The policy to change to (valid pointer returned from
+ * @ref kbase_pm_list_policies)
+ */
+void kbase_pm_set_policy(struct kbase_device *kbdev,
+ const struct kbase_pm_policy *policy);
+
+/**
+ * Retrieve a static list of the available policies.
+ *
+ * @param[out] policies An array pointer to take the list of policies. This may
+ * be NULL. The contents of this array must not be
+ * modified.
+ *
+ * @return The number of policies
+ */
+int kbase_pm_list_policies(const struct kbase_pm_policy * const **policies);
+
+#endif /* _KBASE_HWACCESS_PM_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hwaccess_time.h b/midgard-0.r2p0/mali_kbase_hwaccess_time.h
new file mode 100755
index 0000000..89d26ea
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hwaccess_time.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/**
+ *
+ */
+
+#ifndef _KBASE_BACKEND_TIME_H_
+#define _KBASE_BACKEND_TIME_H_
+
+/**
+ * kbase_backend_get_gpu_time() - Get current GPU time
+ * @kbdev: Device pointer
+ * @cycle_counter: Pointer to u64 to store cycle counter in
+ * @system_time: Pointer to u64 to store system time in
+ * @ts: Pointer to struct timespec to store current monotonic
+ * time in
+ */
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+ u64 *system_time, struct timespec *ts);
+
+/**
+ * kbase_wait_write_flush() - Wait for GPU write flush
+ * @kctx: Context pointer
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * If GPU resets occur then the counters are reset to zero, the delay may not be
+ * as expected.
+ *
+ * This function is only in use for BASE_HW_ISSUE_6367
+ */
+#ifndef CONFIG_MALI_NO_MALI
+void kbase_wait_write_flush(struct kbase_context *kctx);
+#endif
+
+#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_hwcnt_reader.h b/midgard-0.r2p0/mali_kbase_hwcnt_reader.h
new file mode 100755
index 0000000..cf7bf1b
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_hwcnt_reader.h
@@ -0,0 +1,66 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_HWCNT_READER_H_
+#define _KBASE_HWCNT_READER_H_
+
+/* The ids of ioctl commands. */
+#define KBASE_HWCNT_READER 0xBE
+#define KBASE_HWCNT_READER_GET_HWVER _IOR(KBASE_HWCNT_READER, 0x00, u32)
+#define KBASE_HWCNT_READER_GET_BUFFER_SIZE _IOR(KBASE_HWCNT_READER, 0x01, u32)
+#define KBASE_HWCNT_READER_DUMP _IOW(KBASE_HWCNT_READER, 0x10, u32)
+#define KBASE_HWCNT_READER_CLEAR _IOW(KBASE_HWCNT_READER, 0x11, u32)
+#define KBASE_HWCNT_READER_GET_BUFFER _IOR(KBASE_HWCNT_READER, 0x20,\
+ struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_PUT_BUFFER _IOW(KBASE_HWCNT_READER, 0x21,\
+ struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_SET_INTERVAL _IOW(KBASE_HWCNT_READER, 0x30, u32)
+#define KBASE_HWCNT_READER_ENABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x40, u32)
+#define KBASE_HWCNT_READER_DISABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x41, u32)
+#define KBASE_HWCNT_READER_GET_API_VERSION _IOW(KBASE_HWCNT_READER, 0xFF, u32)
+
+/**
+ * struct kbase_hwcnt_reader_metadata - hwcnt reader sample buffer metadata
+ * @timestamp: time when sample was collected
+ * @event_id: id of an event that triggered sample collection
+ * @buffer_idx: position in sampling area where sample buffer was stored
+ */
+struct kbase_hwcnt_reader_metadata {
+ u64 timestamp;
+ u32 event_id;
+ u32 buffer_idx;
+};
+
+/**
+ * enum base_hwcnt_reader_event - hwcnt dumping events
+ * @BASE_HWCNT_READER_EVENT_MANUAL: manual request for dump
+ * @BASE_HWCNT_READER_EVENT_PERIODIC: periodic dump
+ * @BASE_HWCNT_READER_EVENT_PREJOB: prejob dump request
+ * @BASE_HWCNT_READER_EVENT_POSTJOB: postjob dump request
+ * @BASE_HWCNT_READER_EVENT_COUNT: number of supported events
+ */
+enum base_hwcnt_reader_event {
+ BASE_HWCNT_READER_EVENT_MANUAL,
+ BASE_HWCNT_READER_EVENT_PERIODIC,
+ BASE_HWCNT_READER_EVENT_PREJOB,
+ BASE_HWCNT_READER_EVENT_POSTJOB,
+
+ BASE_HWCNT_READER_EVENT_COUNT
+};
+
+#endif /* _KBASE_HWCNT_READER_H_ */
+
diff --git a/midgard-0.r2p0/mali_kbase_ipa.c b/midgard-0.r2p0/mali_kbase_ipa.c
new file mode 100755
index 0000000..c579d0a
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_ipa.c
@@ -0,0 +1,431 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#include <linux/of.h>
+#include <linux/sysfs.h>
+
+#include <mali_kbase.h>
+
+#define NR_IPA_GROUPS 8
+
+struct kbase_ipa_context;
+
+/**
+ * struct ipa_group - represents a single IPA group
+ * @name: name of the IPA group
+ * @capacitance: capacitance constant for IPA group
+ * @calc_power: function to calculate power for IPA group
+ */
+struct ipa_group {
+ const char *name;
+ u32 capacitance;
+ u32 (*calc_power)(struct kbase_ipa_context *,
+ struct ipa_group *);
+};
+
+#include <mali_kbase_ipa_tables.h>
+
+/**
+ * struct kbase_ipa_context - IPA context per device
+ * @kbdev: pointer to kbase device
+ * @groups: array of IPA groups for this context
+ * @vinstr_cli: vinstr client handle
+ * @vinstr_buffer: buffer to dump hardware counters onto
+ * @ipa_lock: protects the entire IPA context
+ */
+struct kbase_ipa_context {
+ struct kbase_device *kbdev;
+ struct ipa_group groups[NR_IPA_GROUPS];
+ struct kbase_vinstr_client *vinstr_cli;
+ void *vinstr_buffer;
+ struct mutex ipa_lock;
+};
+
+static ssize_t show_ipa_group(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+ struct kbase_ipa_context *ctx = kbdev->ipa_ctx;
+ ssize_t count = -EINVAL;
+ size_t i;
+
+ mutex_lock(&ctx->ipa_lock);
+ for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
+ if (!strcmp(ctx->groups[i].name, attr->attr.name)) {
+ count = snprintf(buf, PAGE_SIZE, "%lu\n",
+ (unsigned long)ctx->groups[i].capacitance);
+ break;
+ }
+ }
+ mutex_unlock(&ctx->ipa_lock);
+ return count;
+}
+
+static ssize_t set_ipa_group(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct kbase_device *kbdev = dev_get_drvdata(dev);
+ struct kbase_ipa_context *ctx = kbdev->ipa_ctx;
+ unsigned long capacitance;
+ size_t i;
+ int err;
+
+ err = kstrtoul(buf, 0, &capacitance);
+ if (err < 0)
+ return err;
+ if (capacitance > U32_MAX)
+ return -ERANGE;
+
+ mutex_lock(&ctx->ipa_lock);
+ for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
+ if (!strcmp(ctx->groups[i].name, attr->attr.name)) {
+ ctx->groups[i].capacitance = capacitance;
+ mutex_unlock(&ctx->ipa_lock);
+ return count;
+ }
+ }
+ mutex_unlock(&ctx->ipa_lock);
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(group0, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group);
+static DEVICE_ATTR(group1, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group);
+static DEVICE_ATTR(group2, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group);
+static DEVICE_ATTR(group3, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group);
+static DEVICE_ATTR(group4, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group);
+static DEVICE_ATTR(group5, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group);
+static DEVICE_ATTR(group6, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group);
+static DEVICE_ATTR(group7, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group);
+
+static struct attribute *kbase_ipa_attrs[] = {
+ &dev_attr_group0.attr,
+ &dev_attr_group1.attr,
+ &dev_attr_group2.attr,
+ &dev_attr_group3.attr,
+ &dev_attr_group4.attr,
+ &dev_attr_group5.attr,
+ &dev_attr_group6.attr,
+ &dev_attr_group7.attr,
+ NULL,
+};
+
+static struct attribute_group kbase_ipa_attr_group = {
+ .name = "ipa",
+ .attrs = kbase_ipa_attrs,
+};
+
+static void init_ipa_groups(struct kbase_ipa_context *ctx)
+{
+ memcpy(ctx->groups, ipa_groups_def, sizeof(ctx->groups));
+}
+
+#if defined(CONFIG_OF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+static int update_ipa_groups_from_dt(struct kbase_ipa_context *ctx)
+{
+ struct kbase_device *kbdev = ctx->kbdev;
+ struct device_node *np, *child;
+ struct ipa_group *group;
+ size_t nr_groups;
+ size_t i;
+ int err;
+
+ np = of_get_child_by_name(kbdev->dev->of_node, "ipa-groups");
+ if (!np)
+ return 0;
+
+ nr_groups = 0;
+ for_each_available_child_of_node(np, child)
+ nr_groups++;
+ if (!nr_groups || nr_groups > ARRAY_SIZE(ctx->groups)) {
+ dev_err(kbdev->dev, "invalid number of IPA groups: %zu", nr_groups);
+ err = -EINVAL;
+ goto err0;
+ }
+
+ for_each_available_child_of_node(np, child) {
+ const char *name;
+ u32 capacitance;
+
+ name = of_get_property(child, "label", NULL);
+ if (!name) {
+ dev_err(kbdev->dev, "label missing for IPA group");
+ err = -EINVAL;
+ goto err0;
+ }
+ err = of_property_read_u32(child, "capacitance",
+ &capacitance);
+ if (err < 0) {
+ dev_err(kbdev->dev, "capacitance missing for IPA group");
+ goto err0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
+ group = &ctx->groups[i];
+ if (!strcmp(group->name, name)) {
+ group->capacitance = capacitance;
+ break;
+ }
+ }
+ }
+
+ of_node_put(np);
+ return 0;
+err0:
+ of_node_put(np);
+ return err;
+}
+#else
+static int update_ipa_groups_from_dt(struct kbase_ipa_context *ctx)
+{
+ return 0;
+}
+#endif
+
+static int reset_ipa_groups(struct kbase_ipa_context *ctx)
+{
+ init_ipa_groups(ctx);
+ return update_ipa_groups_from_dt(ctx);
+}
+
+static inline u32 read_hwcnt(struct kbase_ipa_context *ctx,
+ u32 offset)
+{
+ u8 *p = ctx->vinstr_buffer;
+
+ return *(u32 *)&p[offset];
+}
+
+static inline u32 add_saturate(u32 a, u32 b)
+{
+ if (U32_MAX - a < b)
+ return U32_MAX;
+ return a + b;
+}
+
+/*
+ * Calculate power estimation based on hardware counter `c'
+ * across all shader cores.
+ */
+static u32 calc_power_sc_single(struct kbase_ipa_context *ctx,
+ struct ipa_group *group, u32 c)
+{
+ struct kbase_device *kbdev = ctx->kbdev;
+ u64 core_mask;
+ u32 base = 0, r = 0;
+
+ core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+ while (core_mask != 0ull) {
+ if ((core_mask & 1ull) != 0ull) {
+ u64 n = read_hwcnt(ctx, base + c);
+ u32 d = read_hwcnt(ctx, GPU_ACTIVE);
+ u32 s = group->capacitance;
+
+ r = add_saturate(r, div_u64(n * s, d));
+ }
+ base += NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT;
+ core_mask >>= 1;
+ }
+ return r;
+}
+
+/*
+ * Calculate power estimation based on hardware counter `c1'
+ * and `c2' across all shader cores.
+ */
+static u32 calc_power_sc_double(struct kbase_ipa_context *ctx,
+ struct ipa_group *group, u32 c1, u32 c2)
+{
+ struct kbase_device *kbdev = ctx->kbdev;
+ u64 core_mask;
+ u32 base = 0, r = 0;
+
+ core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+ while (core_mask != 0ull) {
+ if ((core_mask & 1ull) != 0ull) {
+ u64 n = read_hwcnt(ctx, base + c1);
+ u32 d = read_hwcnt(ctx, GPU_ACTIVE);
+ u32 s = group->capacitance;
+
+ r = add_saturate(r, div_u64(n * s, d));
+ n = read_hwcnt(ctx, base + c2);
+ r = add_saturate(r, div_u64(n * s, d));
+ }
+ base += NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT;
+ core_mask >>= 1;
+ }
+ return r;
+}
+
+static u32 calc_power_single(struct kbase_ipa_context *ctx,
+ struct ipa_group *group, u32 c)
+{
+ u64 n = read_hwcnt(ctx, c);
+ u32 d = read_hwcnt(ctx, GPU_ACTIVE);
+ u32 s = group->capacitance;
+
+ return div_u64(n * s, d);
+}
+
+static u32 calc_power_group0(struct kbase_ipa_context *ctx,
+ struct ipa_group *group)
+{
+ return calc_power_single(ctx, group, L2_ANY_LOOKUP);
+}
+
+static u32 calc_power_group1(struct kbase_ipa_context *ctx,
+ struct ipa_group *group)
+{
+ return calc_power_single(ctx, group, TILER_ACTIVE);
+}
+
+static u32 calc_power_group2(struct kbase_ipa_context *ctx,
+ struct ipa_group *group)
+{
+ return calc_power_sc_single(ctx, group, FRAG_ACTIVE);
+}
+
+static u32 calc_power_group3(struct kbase_ipa_context *ctx,
+ struct ipa_group *group)
+{
+ return calc_power_sc_double(ctx, group, VARY_SLOT_32,
+ VARY_SLOT_16);
+}
+
+static u32 calc_power_group4(struct kbase_ipa_context *ctx,
+ struct ipa_group *group)
+{
+ return calc_power_sc_single(ctx, group, TEX_COORD_ISSUE);
+}
+
+static u32 calc_power_group5(struct kbase_ipa_context *ctx,
+ struct ipa_group *group)
+{
+ return calc_power_sc_single(ctx, group, EXEC_INSTR_COUNT);
+}
+
+static u32 calc_power_group6(struct kbase_ipa_context *ctx,
+ struct ipa_group *group)
+{
+ return calc_power_sc_double(ctx, group, BEATS_RD_LSC,
+ BEATS_WR_LSC);
+}
+
+static u32 calc_power_group7(struct kbase_ipa_context *ctx,
+ struct ipa_group *group)
+{
+ return calc_power_sc_single(ctx, group, EXEC_CORE_ACTIVE);
+}
+
+static int attach_vinstr(struct kbase_ipa_context *ctx)
+{
+ struct kbase_device *kbdev = ctx->kbdev;
+ struct kbase_uk_hwcnt_reader_setup setup;
+ size_t dump_size;
+
+ dump_size = kbase_vinstr_dump_size(kbdev);
+ ctx->vinstr_buffer = kzalloc(dump_size, GFP_KERNEL);
+ if (!ctx->vinstr_buffer) {
+ dev_err(kbdev->dev, "Failed to allocate IPA dump buffer");
+ return -1;
+ }
+
+ setup.jm_bm = ~0u;
+ setup.shader_bm = ~0u;
+ setup.tiler_bm = ~0u;
+ setup.mmu_l2_bm = ~0u;
+ ctx->vinstr_cli = kbase_vinstr_hwcnt_kernel_setup(kbdev->vinstr_ctx,
+ &setup, ctx->vinstr_buffer);
+ if (!ctx->vinstr_cli) {
+ dev_err(kbdev->dev, "Failed to register IPA with vinstr core");
+ kfree(ctx->vinstr_buffer);
+ ctx->vinstr_buffer = NULL;
+ return -1;
+ }
+ return 0;
+}
+
+static void detach_vinstr(struct kbase_ipa_context *ctx)
+{
+ if (ctx->vinstr_cli)
+ kbase_vinstr_detach_client(ctx->vinstr_cli);
+ ctx->vinstr_cli = NULL;
+ kfree(ctx->vinstr_buffer);
+ ctx->vinstr_buffer = NULL;
+}
+
+struct kbase_ipa_context *kbase_ipa_init(struct kbase_device *kbdev)
+{
+ struct kbase_ipa_context *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ mutex_init(&ctx->ipa_lock);
+ ctx->kbdev = kbdev;
+
+ err = reset_ipa_groups(ctx);
+ if (err < 0)
+ goto err0;
+
+ err = sysfs_create_group(&kbdev->dev->kobj, &kbase_ipa_attr_group);
+ if (err < 0)
+ goto err0;
+
+ return ctx;
+err0:
+ kfree(ctx);
+ return NULL;
+}
+
+void kbase_ipa_term(struct kbase_ipa_context *ctx)
+{
+ struct kbase_device *kbdev = ctx->kbdev;
+
+ detach_vinstr(ctx);
+ sysfs_remove_group(&kbdev->dev->kobj, &kbase_ipa_attr_group);
+ kfree(ctx);
+}
+
+u32 kbase_ipa_dynamic_power(struct kbase_ipa_context *ctx, int *err)
+{
+ struct ipa_group *group;
+ u32 power = 0;
+ size_t i;
+
+ mutex_lock(&ctx->ipa_lock);
+ if (!ctx->vinstr_cli) {
+ *err = attach_vinstr(ctx);
+ if (*err < 0)
+ goto err0;
+ }
+ *err = kbase_vinstr_hwc_dump(ctx->vinstr_cli,
+ BASE_HWCNT_READER_EVENT_MANUAL);
+ if (*err)
+ goto err0;
+ for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
+ group = &ctx->groups[i];
+ power = add_saturate(power, group->calc_power(ctx, group));
+ }
+err0:
+ mutex_unlock(&ctx->ipa_lock);
+ return power;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_dynamic_power);
diff --git a/midgard-0.r2p0/mali_kbase_ipa.h b/midgard-0.r2p0/mali_kbase_ipa.h
new file mode 100755
index 0000000..e2234d1
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_ipa.h
@@ -0,0 +1,41 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+struct kbase_ipa_context;
+
+/**
+ * kbase_ipa_init - initialize the kbase ipa core
+ * @kbdev: kbase device
+ *
+ * Return: pointer to the IPA context or NULL on failure
+ */
+struct kbase_ipa_context *kbase_ipa_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_ipa_term - terminate the kbase ipa core
+ * @ctx: pointer to the IPA context
+ */
+void kbase_ipa_term(struct kbase_ipa_context *ctx);
+
+/**
+ * kbase_ipa_dynamic_power - calculate power
+ * @ctx: pointer to the IPA context
+ * @err: 0 on success, negative on failure
+ *
+ * Return: returns power consumption as mw @ 1GHz @ 1V
+ */
+u32 kbase_ipa_dynamic_power(struct kbase_ipa_context *ctx, int *err);
diff --git a/midgard-0.r2p0/mali_kbase_ipa_tables.h b/midgard-0.r2p0/mali_kbase_ipa_tables.h
new file mode 100755
index 0000000..101abfe
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_ipa_tables.h
@@ -0,0 +1,104 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#define NR_BYTES_PER_CNT 4
+#define NR_CNT_PER_BLOCK 64
+
+#define JM_BASE (0 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT)
+#define TILER_BASE (1 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT)
+#define MMU_BASE (2 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT)
+#define SC0_BASE (3 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT)
+
+#define GPU_ACTIVE (JM_BASE + NR_BYTES_PER_CNT * 6)
+#define TILER_ACTIVE (TILER_BASE + NR_BYTES_PER_CNT * 45)
+#define L2_ANY_LOOKUP (MMU_BASE + NR_BYTES_PER_CNT * 25)
+#define FRAG_ACTIVE (SC0_BASE + NR_BYTES_PER_CNT * 4)
+#define EXEC_CORE_ACTIVE (SC0_BASE + NR_BYTES_PER_CNT * 26)
+#define EXEC_INSTR_COUNT (SC0_BASE + NR_BYTES_PER_CNT * 28)
+#define TEX_COORD_ISSUE (SC0_BASE + NR_BYTES_PER_CNT * 40)
+#define VARY_SLOT_32 (SC0_BASE + NR_BYTES_PER_CNT * 50)
+#define VARY_SLOT_16 (SC0_BASE + NR_BYTES_PER_CNT * 51)
+#define BEATS_RD_LSC (SC0_BASE + NR_BYTES_PER_CNT * 56)
+#define BEATS_WR_LSC (SC0_BASE + NR_BYTES_PER_CNT * 61)
+
+static u32 calc_power_group0(struct kbase_ipa_context *ctx,
+ struct ipa_group *group);
+static u32 calc_power_group1(struct kbase_ipa_context *ctx,
+ struct ipa_group *group);
+static u32 calc_power_group2(struct kbase_ipa_context *ctx,
+ struct ipa_group *group);
+static u32 calc_power_group3(struct kbase_ipa_context *ctx,
+ struct ipa_group *group);
+static u32 calc_power_group4(struct kbase_ipa_context *ctx,
+ struct ipa_group *group);
+static u32 calc_power_group5(struct kbase_ipa_context *ctx,
+ struct ipa_group *group);
+static u32 calc_power_group6(struct kbase_ipa_context *ctx,
+ struct ipa_group *group);
+static u32 calc_power_group7(struct kbase_ipa_context *ctx,
+ struct ipa_group *group);
+
+static struct ipa_group ipa_groups_def[] = {
+ /* L2 */
+ {
+ .name = "group0",
+ .capacitance = 687,
+ .calc_power = calc_power_group0,
+ },
+ /* TILER */
+ {
+ .name = "group1",
+ .capacitance = 0,
+ .calc_power = calc_power_group1,
+ },
+ /* FRAG */
+ {
+ .name = "group2",
+ .capacitance = 23,
+ .calc_power = calc_power_group2,
+ },
+ /* VARY */
+ {
+ .name = "group3",
+ .capacitance = 108,
+ .calc_power = calc_power_group3,
+ },
+ /* TEX */
+ {
+ .name = "group4",
+ .capacitance = 128,
+ .calc_power = calc_power_group4,
+ },
+ /* EXEC INSTR */
+ {
+ .name = "group5",
+ .capacitance = 249,
+ .calc_power = calc_power_group5,
+ },
+ /* LSC */
+ {
+ .name = "group6",
+ .capacitance = 0,
+ .calc_power = calc_power_group6,
+ },
+ /* EXEC OVERHEAD */
+ {
+ .name = "group7",
+ .capacitance = 29,
+ .calc_power = calc_power_group7,
+ },
+};
diff --git a/midgard-0.r2p0/mali_kbase_jd.c b/midgard-0.r2p0/mali_kbase_jd.c
new file mode 100755
index 0000000..81952e2
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_jd.c
@@ -0,0 +1,1880 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#include <mali_kbase.h>
+#include <mali_kbase_uku.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <linux/ratelimit.h>
+
+#include <mali_kbase_jm.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_tlstream.h>
+
+#include "mali_kbase_dma_fence.h"
+
+#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+/* random32 was renamed to prandom_u32 in 3.8 */
+#define prandom_u32 random32
+#endif
+
+/* Return whether katom will run on the GPU or not. Currently only soft jobs and
+ * dependency-only atoms do not run on the GPU */
+#define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) || \
+ ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == \
+ BASE_JD_REQ_DEP)))
+/*
+ * This is the kernel side of the API. Only entry points are:
+ * - kbase_jd_submit(): Called from userspace to submit a single bag
+ * - kbase_jd_done(): Called from interrupt context to track the
+ * completion of a job.
+ * Callouts:
+ * - to the job manager (enqueue a job)
+ * - to the event subsystem (signals the completion/failure of bag/job-chains).
+ */
+
+static void __user *
+get_compat_pointer(struct kbase_context *kctx, const union kbase_pointer *p)
+{
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ return compat_ptr(p->compat_value);
+#endif
+ return p->value;
+}
+
+/* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs
+ *
+ * Returns whether the JS needs a reschedule.
+ *
+ * Note that the caller must also check the atom status and
+ * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
+ */
+static int jd_run_atom(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+ if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
+ /* Dependency only atom */
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ return 0;
+ } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+ /* Soft-job */
+ if (katom->will_fail_event_code) {
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ return 0;
+ }
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+ == BASE_JD_REQ_SOFT_REPLAY) {
+ if (!kbase_replay_process(katom))
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ } else if (kbase_process_soft_job(katom) == 0) {
+ kbase_finish_soft_job(katom);
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ }
+ return 0;
+ }
+
+ katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+ /* Queue an action about whether we should try scheduling a context */
+ return kbasep_js_add_job(kctx, katom);
+}
+
+#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
+{
+ struct kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(katom);
+ kbdev = katom->kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Check whether the atom's other dependencies were already met. If
+ * katom is a GPU atom then the job scheduler may be able to represent
+ * the dependencies, hence we may attempt to submit it before they are
+ * met. Other atoms must have had both dependencies resolved.
+ */
+ if (IS_GPU_ATOM(katom) ||
+ (!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
+ !kbase_jd_katom_dep_atom(&katom->dep[1]))) {
+ /* katom dep complete, attempt to run it */
+ bool resched = false;
+
+ resched = jd_run_atom(katom);
+
+ if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+ /* The atom has already finished */
+ resched |= jd_done_nolock(katom, NULL);
+ }
+
+ if (resched)
+ kbase_js_sched_all(kbdev);
+ }
+}
+#endif
+
+#ifdef CONFIG_KDS
+
+/* Add the katom to the kds waiting list.
+ * Atoms must be added to the waiting list after a successful call to kds_async_waitall.
+ * The caller must hold the kbase_jd_context.lock */
+
+static void kbase_jd_kds_waiters_add(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx;
+
+ KBASE_DEBUG_ASSERT(katom);
+
+ kctx = katom->kctx;
+
+ list_add_tail(&katom->node, &kctx->waiting_kds_resource);
+}
+
+/* Remove the katom from the kds waiting list.
+ * Atoms must be removed from the waiting list before a call to kds_resource_set_release_sync.
+ * The supplied katom must first have been added to the list with a call to kbase_jd_kds_waiters_add.
+ * The caller must hold the kbase_jd_context.lock */
+
+static void kbase_jd_kds_waiters_remove(struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+ list_del(&katom->node);
+}
+
+static void kds_dep_clear(void *callback_parameter, void *callback_extra_parameter)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_jd_context *ctx;
+
+ katom = (struct kbase_jd_atom *)callback_parameter;
+ KBASE_DEBUG_ASSERT(katom);
+
+ ctx = &katom->kctx->jctx;
+
+ /* If KDS resource has already been satisfied (e.g. due to zapping)
+ * do nothing.
+ */
+ mutex_lock(&ctx->lock);
+ if (!katom->kds_dep_satisfied) {
+ katom->kds_dep_satisfied = true;
+ kbase_jd_dep_clear_locked(katom);
+ }
+ mutex_unlock(&ctx->lock);
+}
+
+static void kbase_cancel_kds_wait_job(struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+
+ /* Prevent job_done_nolock from being called twice on an atom when
+ * there is a race between job completion and cancellation */
+
+ if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
+ /* Wait was cancelled - zap the atom */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+ }
+}
+#endif /* CONFIG_KDS */
+
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
+{
+#ifdef CONFIG_KDS
+ if (katom->kds_rset) {
+ struct kbase_jd_context *jctx = &katom->kctx->jctx;
+
+ /*
+ * As the atom is no longer waiting, remove it from
+ * the waiting list.
+ */
+
+ mutex_lock(&jctx->lock);
+ kbase_jd_kds_waiters_remove(katom);
+ mutex_unlock(&jctx->lock);
+
+ /* Release the kds resource or cancel if zapping */
+ kds_resource_set_release_sync(&katom->kds_rset);
+ }
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ /* Flush dma-fence workqueue to ensure that any callbacks that may have
+ * been queued are done before continuing.
+ * Any successfully completed atom would have had all it's callbacks
+ * completed before the atom was run, so only flush for failed atoms.
+ */
+ if (katom->event_code != BASE_JD_EVENT_DONE)
+ flush_workqueue(katom->kctx->dma_fence.wq);
+#endif /* CONFIG_MALI_DMA_FENCE */
+}
+
+static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+#ifdef CONFIG_KDS
+ /* Prevent the KDS resource from triggering the atom in case of zapping */
+ if (katom->kds_rset)
+ katom->kds_dep_satisfied = true;
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_signal(katom);
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ kbase_gpu_vm_lock(katom->kctx);
+ /* only roll back if extres is non-NULL */
+ if (katom->extres) {
+ u32 res_no;
+
+ res_no = katom->nr_extres;
+ while (res_no-- > 0) {
+ struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+ struct kbase_va_region *reg;
+
+ reg = kbase_region_tracker_find_region_base_address(
+ katom->kctx,
+ katom->extres[res_no].gpu_address);
+ kbase_unmap_external_resource(katom->kctx, reg, alloc);
+ }
+ kfree(katom->extres);
+ katom->extres = NULL;
+ }
+ kbase_gpu_vm_unlock(katom->kctx);
+}
+
+/*
+ * Set up external resources needed by this job.
+ *
+ * jctx.lock must be held when this is called.
+ */
+
+static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom)
+{
+ int err_ret_val = -EINVAL;
+ u32 res_no;
+#ifdef CONFIG_KDS
+ u32 kds_res_count = 0;
+ struct kds_resource **kds_resources = NULL;
+ unsigned long *kds_access_bitmap = NULL;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ struct kbase_dma_fence_resv_info info = {
+ .dma_fence_resv_count = 0,
+ };
+#endif
+ struct base_external_resource *input_extres;
+
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+ /* no resources encoded, early out */
+ if (!katom->nr_extres)
+ return -EINVAL;
+
+ katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL);
+ if (NULL == katom->extres) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+
+ /* copy user buffer to the end of our real buffer.
+ * Make sure the struct sizes haven't changed in a way
+ * we don't support */
+ BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres));
+ input_extres = (struct base_external_resource *)
+ (((unsigned char *)katom->extres) +
+ (sizeof(*katom->extres) - sizeof(*input_extres)) *
+ katom->nr_extres);
+
+ if (copy_from_user(input_extres,
+ get_compat_pointer(katom->kctx, &user_atom->extres_list),
+ sizeof(*input_extres) * katom->nr_extres) != 0) {
+ err_ret_val = -EINVAL;
+ goto early_err_out;
+ }
+#ifdef CONFIG_KDS
+ /* assume we have to wait for all */
+ KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
+ kds_resources = kmalloc_array(katom->nr_extres, sizeof(struct kds_resource *), GFP_KERNEL);
+
+ if (!kds_resources) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+
+ KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
+ kds_access_bitmap = kcalloc(BITS_TO_LONGS(katom->nr_extres),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!kds_access_bitmap) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ info.resv_objs = kmalloc_array(katom->nr_extres,
+ sizeof(struct reservation_object *),
+ GFP_KERNEL);
+ if (!info.resv_objs) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+
+ info.dma_fence_excl_bitmap = kcalloc(BITS_TO_LONGS(katom->nr_extres),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!info.dma_fence_excl_bitmap) {
+ err_ret_val = -ENOMEM;
+ goto early_err_out;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ /* Take the processes mmap lock */
+ down_read(&current->mm->mmap_sem);
+
+ /* need to keep the GPU VM locked while we set up UMM buffers */
+ kbase_gpu_vm_lock(katom->kctx);
+ for (res_no = 0; res_no < katom->nr_extres; res_no++) {
+ struct base_external_resource *res;
+ struct kbase_va_region *reg;
+ struct kbase_mem_phy_alloc *alloc;
+ bool exclusive;
+
+ res = &input_extres[res_no];
+ exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE)
+ ? true : false;
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ katom->kctx,
+ res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+ /* did we find a matching region object? */
+ if (NULL == reg || (reg->flags & KBASE_REG_FREE)) {
+ /* roll back */
+ goto failed_loop;
+ }
+
+ if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) &&
+ (reg->flags & KBASE_REG_SECURE)) {
+ katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED;
+ }
+
+ alloc = kbase_map_external_resource(katom->kctx, reg,
+ current->mm
+#ifdef CONFIG_KDS
+ , &kds_res_count, kds_resources,
+ kds_access_bitmap, exclusive
+#endif
+ );
+ if (!alloc) {
+ err_ret_val = -EINVAL;
+ goto failed_loop;
+ }
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+ struct reservation_object *resv;
+
+ resv = reg->gpu_alloc->imported.umm.dma_buf->resv;
+ if (resv)
+ kbase_dma_fence_add_reservation(resv, &info,
+ exclusive);
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ /* finish with updating out array with the data we found */
+ /* NOTE: It is important that this is the last thing we do (or
+ * at least not before the first write) as we overwrite elements
+ * as we loop and could be overwriting ourself, so no writes
+ * until the last read for an element.
+ * */
+ katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */
+ katom->extres[res_no].alloc = alloc;
+ }
+ /* successfully parsed the extres array */
+ /* drop the vm lock before we call into kds */
+ kbase_gpu_vm_unlock(katom->kctx);
+
+ /* Release the processes mmap lock */
+ up_read(&current->mm->mmap_sem);
+
+#ifdef CONFIG_KDS
+ if (kds_res_count) {
+ int wait_failed;
+
+ /* We have resources to wait for with kds */
+ katom->kds_dep_satisfied = false;
+
+ wait_failed = kds_async_waitall(&katom->kds_rset,
+ &katom->kctx->jctx.kds_cb, katom, NULL,
+ kds_res_count, kds_access_bitmap,
+ kds_resources);
+
+ if (wait_failed)
+ goto failed_kds_setup;
+ else
+ kbase_jd_kds_waiters_add(katom);
+ } else {
+ /* Nothing to wait for, so kds dep met */
+ katom->kds_dep_satisfied = true;
+ }
+ kfree(kds_resources);
+ kfree(kds_access_bitmap);
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (info.dma_fence_resv_count) {
+ int ret;
+
+ ret = kbase_dma_fence_wait(katom, &info);
+ if (ret < 0)
+ goto failed_dma_fence_setup;
+ }
+
+ kfree(info.resv_objs);
+ kfree(info.dma_fence_excl_bitmap);
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ /* all done OK */
+ return 0;
+
+/* error handling section */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+failed_dma_fence_setup:
+#ifdef CONFIG_KDS
+ /* If we are here, dma_fence setup failed but KDS didn't.
+ * Revert KDS setup if any.
+ */
+ if (kds_res_count) {
+ mutex_unlock(&katom->kctx->jctx.lock);
+ kds_resource_set_release_sync(&katom->kds_rset);
+ mutex_lock(&katom->kctx->jctx.lock);
+
+ kbase_jd_kds_waiters_remove(katom);
+ katom->kds_dep_satisfied = true;
+ }
+#endif /* CONFIG_KDS */
+#endif /* CONFIG_MALI_DMA_FENCE */
+#ifdef CONFIG_KDS
+failed_kds_setup:
+#endif
+#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
+ /* Lock the processes mmap lock */
+ down_read(&current->mm->mmap_sem);
+
+ /* lock before we unmap */
+ kbase_gpu_vm_lock(katom->kctx);
+#endif
+
+ failed_loop:
+ /* undo the loop work */
+ while (res_no-- > 0) {
+ struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+
+ kbase_unmap_external_resource(katom->kctx, NULL, alloc);
+ }
+ kbase_gpu_vm_unlock(katom->kctx);
+
+ /* Release the processes mmap lock */
+ up_read(&current->mm->mmap_sem);
+
+ early_err_out:
+ kfree(katom->extres);
+ katom->extres = NULL;
+#ifdef CONFIG_KDS
+ kfree(kds_resources);
+ kfree(kds_access_bitmap);
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ kfree(info.resv_objs);
+ kfree(info.dma_fence_excl_bitmap);
+#endif
+ return err_ret_val;
+}
+
+static inline void jd_resolve_dep(struct list_head *out_list,
+ struct kbase_jd_atom *katom,
+ u8 d, bool ctx_is_dying)
+{
+ u8 other_d = !d;
+
+ while (!list_empty(&katom->dep_head[d])) {
+ struct kbase_jd_atom *dep_atom;
+ struct kbase_jd_atom *other_dep_atom;
+ u8 dep_type;
+
+ dep_atom = list_entry(katom->dep_head[d].next,
+ struct kbase_jd_atom, dep_item[d]);
+ list_del(katom->dep_head[d].next);
+
+ dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]);
+ kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
+
+ if (katom->event_code != BASE_JD_EVENT_DONE &&
+ (dep_type != BASE_JD_DEP_TYPE_ORDER)) {
+#ifdef CONFIG_KDS
+ if (!dep_atom->kds_dep_satisfied) {
+ /* Just set kds_dep_satisfied to true. If the callback happens after this then it will early out and
+ * do nothing. If the callback doesn't happen then kbase_jd_post_external_resources will clean up
+ */
+ dep_atom->kds_dep_satisfied = true;
+ }
+#endif
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_cancel_callbacks(dep_atom);
+#endif
+
+ dep_atom->event_code = katom->event_code;
+ KBASE_DEBUG_ASSERT(dep_atom->status !=
+ KBASE_JD_ATOM_STATE_UNUSED);
+
+ if ((dep_atom->core_req & BASE_JD_REQ_SOFT_REPLAY)
+ != BASE_JD_REQ_SOFT_REPLAY) {
+ dep_atom->will_fail_event_code =
+ dep_atom->event_code;
+ } else {
+ dep_atom->status =
+ KBASE_JD_ATOM_STATE_COMPLETED;
+ }
+ }
+ other_dep_atom = (struct kbase_jd_atom *)
+ kbase_jd_katom_dep_atom(&dep_atom->dep[other_d]);
+
+ if (!dep_atom->in_jd_list && (!other_dep_atom ||
+ (IS_GPU_ATOM(dep_atom) && !ctx_is_dying &&
+ !dep_atom->will_fail_event_code &&
+ !other_dep_atom->will_fail_event_code))) {
+ bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+ int dep_count;
+
+ dep_count = atomic_read(&dep_atom->dma_fence.dep_count);
+ if (likely(dep_count == -1)) {
+ dep_satisfied = true;
+ } else {
+ /*
+ * There are either still active callbacks, or
+ * all fences for this @dep_atom has signaled,
+ * but the worker that will queue the atom has
+ * not yet run.
+ *
+ * Wait for the fences to signal and the fence
+ * worker to run and handle @dep_atom. If
+ * @dep_atom was completed due to error on
+ * @katom, then the fence worker will pick up
+ * the complete status and error code set on
+ * @dep_atom above.
+ */
+ dep_satisfied = false;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+#ifdef CONFIG_KDS
+ dep_satisfied = dep_satisfied && dep_atom->kds_dep_satisfied;
+#endif
+
+ if (dep_satisfied) {
+ dep_atom->in_jd_list = true;
+ list_add_tail(&dep_atom->jd_item, out_list);
+ }
+ }
+ }
+}
+
+KBASE_EXPORT_TEST_API(jd_resolve_dep);
+
+#if MALI_CUSTOMER_RELEASE == 0
+static void jd_force_failure(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
+{
+ kbdev->force_replay_count++;
+
+ if (kbdev->force_replay_count >= kbdev->force_replay_limit) {
+ kbdev->force_replay_count = 0;
+ katom->event_code = BASE_JD_EVENT_FORCE_REPLAY;
+
+ if (kbdev->force_replay_random)
+ kbdev->force_replay_limit =
+ (prandom_u32() % KBASEP_FORCE_REPLAY_RANDOM_LIMIT) + 1;
+
+ dev_info(kbdev->dev, "force_replay : promoting to error\n");
+ }
+}
+
+/** Test to see if atom should be forced to fail.
+ *
+ * This function will check if an atom has a replay job as a dependent. If so
+ * then it will be considered for forced failure. */
+static void jd_check_force_failure(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ int i;
+
+ if ((kbdev->force_replay_limit == KBASEP_FORCE_REPLAY_DISABLED) ||
+ (katom->core_req & BASEP_JD_REQ_EVENT_NEVER))
+ return;
+
+ for (i = 1; i < BASE_JD_ATOM_COUNT; i++) {
+ if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom ||
+ kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
+ struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
+
+ if ((dep_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
+ BASE_JD_REQ_SOFT_REPLAY &&
+ (dep_atom->core_req & kbdev->force_replay_core_req)
+ == kbdev->force_replay_core_req) {
+ jd_force_failure(kbdev, katom);
+ return;
+ }
+ }
+ }
+}
+#endif
+
+/**
+ * is_dep_valid - Validate that a dependency is valid for early dependency
+ * submission
+ * @katom: Dependency atom to validate
+ *
+ * A dependency is valid if any of the following are true :
+ * - It does not exist (a non-existent dependency does not block submission)
+ * - It is in the job scheduler
+ * - It has completed, does not have a failure event code, and has not been
+ * marked to fail in the future
+ *
+ * Return: true if valid, false otherwise
+ */
+static bool is_dep_valid(struct kbase_jd_atom *katom)
+{
+ /* If there's no dependency then this is 'valid' from the perspective of
+ * early dependency submission */
+ if (!katom)
+ return true;
+
+ /* Dependency must have reached the job scheduler */
+ if (katom->status < KBASE_JD_ATOM_STATE_IN_JS)
+ return false;
+
+ /* If dependency has completed and has failed or will fail then it is
+ * not valid */
+ if (katom->status >= KBASE_JD_ATOM_STATE_HW_COMPLETED &&
+ (katom->event_code != BASE_JD_EVENT_DONE ||
+ katom->will_fail_event_code))
+ return false;
+
+ return true;
+}
+
+static void jd_try_submitting_deps(struct list_head *out_list,
+ struct kbase_jd_atom *node)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct list_head *pos;
+
+ list_for_each(pos, &node->dep_head[i]) {
+ struct kbase_jd_atom *dep_atom = list_entry(pos,
+ struct kbase_jd_atom, dep_item[i]);
+
+ if (IS_GPU_ATOM(dep_atom) && !dep_atom->in_jd_list) {
+ /*Check if atom deps look sane*/
+ bool dep0_valid = is_dep_valid(
+ dep_atom->dep[0].atom);
+ bool dep1_valid = is_dep_valid(
+ dep_atom->dep[1].atom);
+ bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+ int dep_count;
+
+ dep_count = atomic_read(
+ &dep_atom->dma_fence.dep_count);
+ if (likely(dep_count == -1)) {
+ dep_satisfied = true;
+ } else {
+ /*
+ * There are either still active callbacks, or
+ * all fences for this @dep_atom has signaled,
+ * but the worker that will queue the atom has
+ * not yet run.
+ *
+ * Wait for the fences to signal and the fence
+ * worker to run and handle @dep_atom. If
+ * @dep_atom was completed due to error on
+ * @katom, then the fence worker will pick up
+ * the complete status and error code set on
+ * @dep_atom above.
+ */
+ dep_satisfied = false;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+#ifdef CONFIG_KDS
+ dep_satisfied = dep_satisfied &&
+ dep_atom->kds_dep_satisfied;
+#endif
+
+ if (dep0_valid && dep1_valid && dep_satisfied) {
+ dep_atom->in_jd_list = true;
+ list_add(&dep_atom->jd_item, out_list);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Perform the necessary handling of an atom that has finished running
+ * on the GPU.
+ *
+ * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
+ * is responsible for calling kbase_finish_soft_job *before* calling this function.
+ *
+ * The caller must hold the kbase_jd_context.lock.
+ */
+bool jd_done_nolock(struct kbase_jd_atom *katom,
+ struct list_head *completed_jobs_ctx)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct list_head completed_jobs;
+ struct list_head runnable_jobs;
+ bool need_to_try_schedule_context = false;
+ int i;
+
+ INIT_LIST_HEAD(&completed_jobs);
+ INIT_LIST_HEAD(&runnable_jobs);
+
+ KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+#if MALI_CUSTOMER_RELEASE == 0
+ jd_check_force_failure(katom);
+#endif
+
+ /* This is needed in case an atom is failed due to being invalid, this
+ * can happen *before* the jobs that the atom depends on have completed */
+ for (i = 0; i < 2; i++) {
+ if (kbase_jd_katom_dep_atom(&katom->dep[i])) {
+ list_del(&katom->dep_item[i]);
+ kbase_jd_katom_dep_clear(&katom->dep[i]);
+ }
+ }
+
+ /* With PRLAM-10817 or PRLAM-10959 the last tile of a fragment job being soft-stopped can fail with
+ * BASE_JD_EVENT_TILE_RANGE_FAULT.
+ *
+ * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the
+ * error code to BASE_JD_EVENT_DONE
+ */
+
+ if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) &&
+ katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) {
+ if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) {
+ /* Promote the failure to job done */
+ katom->event_code = BASE_JD_EVENT_DONE;
+ katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED);
+ }
+ }
+
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ list_add_tail(&katom->jd_item, &completed_jobs);
+
+ while (!list_empty(&completed_jobs)) {
+ katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, jd_item);
+ list_del(completed_jobs.prev);
+ KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+ for (i = 0; i < 2; i++)
+ jd_resolve_dep(&runnable_jobs, katom, i,
+ kbase_ctx_flag(kctx, KCTX_DYING));
+
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+ kbase_jd_post_external_resources(katom);
+
+ while (!list_empty(&runnable_jobs)) {
+ struct kbase_jd_atom *node;
+
+ node = list_entry(runnable_jobs.next,
+ struct kbase_jd_atom, jd_item);
+ list_del(runnable_jobs.next);
+ node->in_jd_list = false;
+
+ KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+ if (node->status != KBASE_JD_ATOM_STATE_COMPLETED &&
+ !kbase_ctx_flag(kctx, KCTX_DYING)) {
+ need_to_try_schedule_context |= jd_run_atom(node);
+ } else {
+ node->event_code = katom->event_code;
+
+ if ((node->core_req &
+ BASE_JD_REQ_SOFT_JOB_TYPE) ==
+ BASE_JD_REQ_SOFT_REPLAY) {
+ if (kbase_replay_process(node))
+ /* Don't complete this atom */
+ continue;
+ } else if (node->core_req &
+ BASE_JD_REQ_SOFT_JOB) {
+ /* If this is a fence wait soft job
+ * then remove it from the list of sync
+ * waiters.
+ */
+ if (BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req)
+ kbasep_remove_waiting_soft_job(node);
+
+ kbase_finish_soft_job(node);
+ }
+ node->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ }
+
+ if (node->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+ list_add_tail(&node->jd_item, &completed_jobs);
+ } else if (node->status == KBASE_JD_ATOM_STATE_IN_JS &&
+ !node->will_fail_event_code) {
+ /* Node successfully submitted, try submitting
+ * dependencies as they may now be representable
+ * in JS */
+ jd_try_submitting_deps(&runnable_jobs, node);
+ }
+ }
+
+ /* Register a completed job as a disjoint event when the GPU
+ * is in a disjoint state (ie. being reset or replaying jobs).
+ */
+ kbase_disjoint_event_potential(kctx->kbdev);
+ if (completed_jobs_ctx)
+ list_add_tail(&katom->jd_item, completed_jobs_ctx);
+ else
+ kbase_event_post(kctx, katom);
+
+ /* Decrement and check the TOTAL number of jobs. This includes
+ * those not tracked by the scheduler: 'not ready to run' and
+ * 'dependency-only' jobs. */
+ if (--kctx->jctx.job_nr == 0)
+ wake_up(&kctx->jctx.zero_jobs_wait); /* All events are safely queued now, and we can signal any waiter
+ * that we've got no more jobs (so we can be safely terminated) */
+ }
+
+ return need_to_try_schedule_context;
+}
+
+KBASE_EXPORT_TEST_API(jd_done_nolock);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+enum {
+ CORE_REQ_DEP_ONLY,
+ CORE_REQ_SOFT,
+ CORE_REQ_COMPUTE,
+ CORE_REQ_FRAGMENT,
+ CORE_REQ_VERTEX,
+ CORE_REQ_TILER,
+ CORE_REQ_FRAGMENT_VERTEX,
+ CORE_REQ_FRAGMENT_VERTEX_TILER,
+ CORE_REQ_FRAGMENT_TILER,
+ CORE_REQ_VERTEX_TILER,
+ CORE_REQ_UNKNOWN
+};
+static const char * const core_req_strings[] = {
+ "Dependency Only Job",
+ "Soft Job",
+ "Compute Shader Job",
+ "Fragment Shader Job",
+ "Vertex/Geometry Shader Job",
+ "Tiler Job",
+ "Fragment Shader + Vertex/Geometry Shader Job",
+ "Fragment Shader + Vertex/Geometry Shader Job + Tiler Job",
+ "Fragment Shader + Tiler Job",
+ "Vertex/Geometry Shader Job + Tiler Job",
+ "Unknown Job"
+};
+static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req)
+{
+ if (core_req & BASE_JD_REQ_SOFT_JOB)
+ return core_req_strings[CORE_REQ_SOFT];
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ return core_req_strings[CORE_REQ_COMPUTE];
+ switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) {
+ case BASE_JD_REQ_DEP:
+ return core_req_strings[CORE_REQ_DEP_ONLY];
+ case BASE_JD_REQ_FS:
+ return core_req_strings[CORE_REQ_FRAGMENT];
+ case BASE_JD_REQ_CS:
+ return core_req_strings[CORE_REQ_VERTEX];
+ case BASE_JD_REQ_T:
+ return core_req_strings[CORE_REQ_TILER];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_CS):
+ return core_req_strings[CORE_REQ_FRAGMENT_VERTEX];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_FRAGMENT_TILER];
+ case (BASE_JD_REQ_CS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_VERTEX_TILER];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER];
+ }
+ return core_req_strings[CORE_REQ_UNKNOWN];
+}
+#endif
+
+bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom)
+{
+ struct kbase_jd_context *jctx = &kctx->jctx;
+ int queued = 0;
+ int i;
+ int sched_prio;
+ bool ret;
+ bool will_fail = false;
+
+ /* Update the TOTAL number of jobs. This includes those not tracked by
+ * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
+ jctx->job_nr++;
+
+ katom->start_timestamp.tv64 = 0;
+ katom->time_spent_us = 0;
+ katom->udata = user_atom->udata;
+ katom->kctx = kctx;
+ katom->nr_extres = user_atom->nr_extres;
+ katom->extres = NULL;
+ katom->device_nr = user_atom->device_nr;
+ katom->affinity = 0;
+ katom->jc = user_atom->jc;
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+ katom->core_req = user_atom->core_req;
+ katom->atom_flags = 0;
+ katom->retry_count = 0;
+ katom->need_cache_flush_cores_retained = 0;
+ katom->pre_dep = NULL;
+ katom->post_dep = NULL;
+ katom->x_pre_dep = NULL;
+ katom->x_post_dep = NULL;
+ katom->will_fail_event_code = BASE_JD_EVENT_NOT_STARTED;
+
+ /* Implicitly sets katom->protected_state.enter as well. */
+ katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+
+ katom->age = kctx->age_count++;
+
+ INIT_LIST_HEAD(&katom->jd_item);
+#ifdef CONFIG_KDS
+ /* Start by assuming that the KDS dependencies are satisfied,
+ * kbase_jd_pre_external_resources will correct this if there are dependencies */
+ katom->kds_dep_satisfied = true;
+ katom->kds_rset = NULL;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_MALI_DMA_FENCE
+ atomic_set(&katom->dma_fence.dep_count, -1);
+#endif
+
+ kbase_tlstream_tl_attrib_atom_state(katom, TL_ATOM_STATE_IDLE);
+
+ /* Don't do anything if there is a mess up with dependencies.
+ This is done in a separate cycle to check both the dependencies at ones, otherwise
+ it will be extra complexity to deal with 1st dependency ( just added to the list )
+ if only the 2nd one has invalid config.
+ */
+ for (i = 0; i < 2; i++) {
+ int dep_atom_number = user_atom->pre_dep[i].atom_id;
+ base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
+
+ if (dep_atom_number) {
+ if (dep_atom_type != BASE_JD_DEP_TYPE_ORDER &&
+ dep_atom_type != BASE_JD_DEP_TYPE_DATA) {
+ katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+
+ /* Wrong dependency setup. Atom will be sent
+ * back to user space. Do not record any
+ * dependencies. */
+ kbase_tlstream_tl_new_atom(
+ katom,
+ kbase_jd_atom_id(kctx, katom));
+ kbase_tlstream_tl_ret_atom_ctx(
+ katom, kctx);
+
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ }
+ }
+
+ /* Add dependencies */
+ for (i = 0; i < 2; i++) {
+ int dep_atom_number = user_atom->pre_dep[i].atom_id;
+ base_jd_dep_type dep_atom_type;
+ struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
+
+ dep_atom_type = user_atom->pre_dep[i].dependency_type;
+ kbase_jd_katom_dep_clear(&katom->dep[i]);
+
+ if (!dep_atom_number)
+ continue;
+
+ if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED ||
+ dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+
+ if (dep_atom->event_code == BASE_JD_EVENT_DONE)
+ continue;
+ /* don't stop this atom if it has an order dependency
+ * only to the failed one, try to submit it through
+ * the normal path
+ */
+ if (dep_atom_type == BASE_JD_DEP_TYPE_ORDER &&
+ dep_atom->event_code > BASE_JD_EVENT_ACTIVE) {
+ continue;
+ }
+
+ /* Atom has completed, propagate the error code if any */
+ katom->event_code = dep_atom->event_code;
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+ /* This atom is going through soft replay or
+ * will be sent back to user space. Do not record any
+ * dependencies. */
+ kbase_tlstream_tl_new_atom(
+ katom,
+ kbase_jd_atom_id(kctx, katom));
+ kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
+
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+ == BASE_JD_REQ_SOFT_REPLAY) {
+ if (kbase_replay_process(katom)) {
+ ret = false;
+ goto out;
+ }
+ }
+ will_fail = true;
+
+ } else {
+ /* Atom is in progress, add this atom to the list */
+ list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
+ kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type);
+ queued = 1;
+ }
+ }
+
+ if (will_fail) {
+ if (!queued) {
+ ret = jd_done_nolock(katom, NULL);
+
+ goto out;
+ } else {
+ katom->will_fail_event_code = katom->event_code;
+ ret = false;
+
+ goto out;
+ }
+ } else {
+ /* These must occur after the above loop to ensure that an atom
+ * that depends on a previous atom with the same number behaves
+ * as expected */
+ katom->event_code = BASE_JD_EVENT_DONE;
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+ }
+
+ /* For invalid priority, be most lenient and choose the default */
+ sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
+ if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
+ sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT;
+ katom->sched_priority = sched_prio;
+
+ /* Create a new atom recording all dependencies it was set up with. */
+ kbase_tlstream_tl_new_atom(
+ katom,
+ kbase_jd_atom_id(kctx, katom));
+ kbase_tlstream_tl_attrib_atom_priority(katom, katom->sched_priority);
+ kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
+ for (i = 0; i < 2; i++)
+ if (BASE_JD_DEP_TYPE_INVALID != kbase_jd_katom_dep_type(
+ &katom->dep[i])) {
+ kbase_tlstream_tl_dep_atom_atom(
+ (void *)kbase_jd_katom_dep_atom(
+ &katom->dep[i]),
+ (void *)katom);
+ } else if (BASE_JD_DEP_TYPE_INVALID !=
+ user_atom->pre_dep[i].dependency_type) {
+ /* Resolved dependency. */
+ int dep_atom_number =
+ user_atom->pre_dep[i].atom_id;
+ struct kbase_jd_atom *dep_atom =
+ &jctx->atoms[dep_atom_number];
+
+ kbase_tlstream_tl_rdep_atom_atom(
+ (void *)dep_atom,
+ (void *)katom);
+ }
+
+ /* Reject atoms with job chain = NULL, as these cause issues with soft-stop */
+ if (!katom->jc && (katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+ dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL");
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
+ /* Reject atoms with an invalid device_nr */
+ if ((katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) &&
+ (katom->device_nr >= kctx->kbdev->gpu_props.num_core_groups)) {
+ dev_warn(kctx->kbdev->dev,
+ "Rejecting atom with invalid device_nr %d",
+ katom->device_nr);
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
+ /* Reject atoms with invalid core requirements */
+ if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
+ (katom->core_req & BASE_JD_REQ_EVENT_COALESCE)) {
+ dev_warn(kctx->kbdev->dev,
+ "Rejecting atom with invalid core requirements");
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+ /* handle what we need to do to access the external resources */
+ if (kbase_jd_pre_external_resources(katom, user_atom) != 0) {
+ /* setup failed (no access, bad resource, unknown resource types, etc.) */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ }
+
+ /* Validate the atom. Function will return error if the atom is
+ * malformed.
+ *
+ * Soft-jobs never enter the job scheduler but have their own initialize method.
+ *
+ * If either fail then we immediately complete the atom with an error.
+ */
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
+ if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ } else {
+ /* Soft-job */
+ if (kbase_prepare_soft_job(katom) != 0) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ }
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+ katom->work_id = atomic_inc_return(&jctx->work_id);
+ trace_gpu_job_enqueue((u32)kctx->id, katom->work_id,
+ kbasep_map_core_reqs_to_string(katom->core_req));
+#endif
+
+ if (queued && !IS_GPU_ATOM(katom)) {
+ ret = false;
+ goto out;
+ }
+#ifdef CONFIG_KDS
+ if (!katom->kds_dep_satisfied) {
+ /* Queue atom due to KDS dependency */
+ ret = false;
+ goto out;
+ }
+#endif /* CONFIG_KDS */
+
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ if (atomic_read(&katom->dma_fence.dep_count) != -1) {
+ ret = false;
+ goto out;
+ }
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+ == BASE_JD_REQ_SOFT_REPLAY) {
+ if (kbase_replay_process(katom))
+ ret = false;
+ else
+ ret = jd_done_nolock(katom, NULL);
+
+ goto out;
+ } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+ if (kbase_process_soft_job(katom) == 0) {
+ kbase_finish_soft_job(katom);
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+
+ ret = false;
+ } else if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+ katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+ ret = kbasep_js_add_job(kctx, katom);
+ /* If job was cancelled then resolve immediately */
+ if (katom->event_code == BASE_JD_EVENT_JOB_CANCELLED)
+ ret = jd_done_nolock(katom, NULL);
+ } else {
+ /* This is a pure dependency. Resolve it immediately */
+ ret = jd_done_nolock(katom, NULL);
+ }
+
+ out:
+ return ret;
+}
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+int kbase_jd_submit(struct kbase_context *kctx,
+ const struct kbase_uk_job_submit *submit_data,
+ int uk6_atom)
+#else
+int kbase_jd_submit(struct kbase_context *kctx,
+ const struct kbase_uk_job_submit *submit_data)
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+{
+ struct kbase_jd_context *jctx = &kctx->jctx;
+ int err = 0;
+ int i;
+ bool need_to_try_schedule_context = false;
+ struct kbase_device *kbdev;
+ void __user *user_addr;
+ u32 latest_flush;
+
+ /*
+ * kbase_jd_submit isn't expected to fail and so all errors with the jobs
+ * are reported by immediately falling them (through event system)
+ */
+ kbdev = kctx->kbdev;
+
+ beenthere(kctx, "%s", "Enter");
+
+ if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
+ return -EINVAL;
+ }
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+ if ((uk6_atom && submit_data->stride !=
+ sizeof(struct base_jd_atom_v2_uk6)) ||
+ submit_data->stride != sizeof(base_jd_atom_v2)) {
+#else
+ if (submit_data->stride != sizeof(base_jd_atom_v2)) {
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+ dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel");
+ return -EINVAL;
+ }
+
+ user_addr = get_compat_pointer(kctx, &submit_data->addr);
+
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(submit_data->nr_atoms, &kctx->timeline.jd_atoms_in_flight));
+
+ /* All atoms submitted in this call have the same flush ID */
+ latest_flush = kbase_backend_get_current_flush_id(kbdev);
+
+ for (i = 0; i < submit_data->nr_atoms; i++) {
+ struct base_jd_atom_v2 user_atom;
+ struct kbase_jd_atom *katom;
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+ if (uk6_atom) {
+ struct base_jd_atom_v2_uk6 user_atom_v6;
+ base_jd_dep_type dep_types[2] = {BASE_JD_DEP_TYPE_DATA, BASE_JD_DEP_TYPE_DATA};
+
+ if (copy_from_user(&user_atom_v6, user_addr,
+ sizeof(user_atom_v6))) {
+ err = -EINVAL;
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx,
+ atomic_sub_return(
+ submit_data->nr_atoms - i,
+ &kctx->timeline.jd_atoms_in_flight));
+ break;
+ }
+ /* Convert from UK6 atom format to UK7 format */
+ user_atom.jc = user_atom_v6.jc;
+ user_atom.udata = user_atom_v6.udata;
+ user_atom.extres_list = user_atom_v6.extres_list;
+ user_atom.nr_extres = user_atom_v6.nr_extres;
+ user_atom.core_req = (u32)(user_atom_v6.core_req & 0x7fff);
+
+ /* atom number 0 is used for no dependency atoms */
+ if (!user_atom_v6.pre_dep[0])
+ dep_types[0] = BASE_JD_DEP_TYPE_INVALID;
+
+ base_jd_atom_dep_set(&user_atom.pre_dep[0],
+ user_atom_v6.pre_dep[0],
+ dep_types[0]);
+
+ /* atom number 0 is used for no dependency atoms */
+ if (!user_atom_v6.pre_dep[1])
+ dep_types[1] = BASE_JD_DEP_TYPE_INVALID;
+
+ base_jd_atom_dep_set(&user_atom.pre_dep[1],
+ user_atom_v6.pre_dep[1],
+ dep_types[1]);
+
+ user_atom.atom_number = user_atom_v6.atom_number;
+ user_atom.prio = user_atom_v6.prio;
+ user_atom.device_nr = user_atom_v6.device_nr;
+ } else {
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+ if (copy_from_user(&user_atom, user_addr, sizeof(user_atom)) != 0) {
+ err = -EINVAL;
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(submit_data->nr_atoms - i, &kctx->timeline.jd_atoms_in_flight));
+ break;
+ }
+#ifdef BASE_LEGACY_UK6_SUPPORT
+ }
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+
+#ifdef BASE_LEGACY_UK10_2_SUPPORT
+ if (KBASE_API_VERSION(10, 3) > kctx->api_version)
+ user_atom.core_req = (u32)(user_atom.compat_core_req
+ & 0x7fff);
+#endif /* BASE_LEGACY_UK10_2_SUPPORT */
+
+ user_addr = (void __user *)((uintptr_t) user_addr + submit_data->stride);
+
+ mutex_lock(&jctx->lock);
+#ifndef compiletime_assert
+#define compiletime_assert_defined
+#define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \
+while (false)
+#endif
+ compiletime_assert((1 << (8*sizeof(user_atom.atom_number))) ==
+ BASE_JD_ATOM_COUNT,
+ "BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
+ compiletime_assert(sizeof(user_atom.pre_dep[0].atom_id) ==
+ sizeof(user_atom.atom_number),
+ "BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
+#ifdef compiletime_assert_defined
+#undef compiletime_assert
+#undef compiletime_assert_defined
+#endif
+ katom = &jctx->atoms[user_atom.atom_number];
+
+ /* Record the flush ID for the cache flush optimisation */
+ katom->flush_id = latest_flush;
+
+ while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) {
+ /* Atom number is already in use, wait for the atom to
+ * complete
+ */
+ mutex_unlock(&jctx->lock);
+
+ /* This thread will wait for the atom to complete. Due
+ * to thread scheduling we are not sure that the other
+ * thread that owns the atom will also schedule the
+ * context, so we force the scheduler to be active and
+ * hence eventually schedule this context at some point
+ * later.
+ */
+ kbase_js_sched_all(kbdev);
+
+ if (wait_event_killable(katom->completed,
+ katom->status ==
+ KBASE_JD_ATOM_STATE_UNUSED) != 0) {
+ /* We're being killed so the result code
+ * doesn't really matter
+ */
+ return 0;
+ }
+ mutex_lock(&jctx->lock);
+ }
+
+ need_to_try_schedule_context |=
+ jd_submit_atom(kctx, &user_atom, katom);
+
+ /* Register a completed job as a disjoint event when the GPU is in a disjoint state
+ * (ie. being reset or replaying jobs).
+ */
+ kbase_disjoint_event_potential(kbdev);
+
+ mutex_unlock(&jctx->lock);
+ }
+
+ if (need_to_try_schedule_context)
+ kbase_js_sched_all(kbdev);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_submit);
+
+void kbase_jd_done_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+ struct kbase_jd_context *jctx;
+ struct kbase_context *kctx;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+ u64 cache_jc = katom->jc;
+ struct kbasep_js_atom_retained_state katom_retained_state;
+ bool context_idle;
+ base_jd_core_req core_req = katom->core_req;
+ u64 affinity = katom->affinity;
+ enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
+
+ /* Soft jobs should never reach this function */
+ KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+ kctx = katom->kctx;
+ jctx = &kctx->jctx;
+ kbdev = kctx->kbdev;
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
+
+ kbase_backend_complete_wq(kbdev, katom);
+
+ /*
+ * Begin transaction on JD context and JS context
+ */
+ mutex_lock(&jctx->lock);
+ kbase_tlstream_tl_attrib_atom_state(katom, TL_ATOM_STATE_DONE);
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* This worker only gets called on contexts that are scheduled *in*. This is
+ * because it only happens in response to an IRQ from a job that was
+ * running.
+ */
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ if (katom->event_code == BASE_JD_EVENT_STOPPED) {
+ /* Atom has been promoted to stopped */
+ unsigned long flags;
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+ kbase_js_unpull(kctx, katom);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&jctx->lock);
+
+ return;
+ }
+
+ if (katom->event_code != BASE_JD_EVENT_DONE)
+ dev_err(kbdev->dev,
+ "t6xx: GPU fault 0x%02lx from job slot %d\n",
+ (unsigned long)katom->event_code,
+ katom->slot_nr);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+ /* Retain state before the katom disappears */
+ kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+
+ context_idle = kbase_js_complete_atom_wq(kctx, katom);
+
+ KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state));
+
+ kbasep_js_remove_job(kbdev, kctx, katom);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+ /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
+ jd_done_nolock(katom, &kctx->completed_jobs);
+
+ /* katom may have been freed now, do not use! */
+
+ if (context_idle) {
+ unsigned long flags;
+
+ context_idle = false;
+ mutex_lock(&js_devdata->queue_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* If kbase_sched() has scheduled this context back in then
+ * KCTX_ACTIVE will have been set after we marked it as
+ * inactive, and another pm reference will have been taken, so
+ * drop our reference. But do not call kbase_jm_idle_ctx(), as
+ * the context is active and fast-starting is allowed.
+ *
+ * If an atom has been fast-started then kctx->atoms_pulled will
+ * be non-zero but KCTX_ACTIVE will still be false (as the
+ * previous pm reference has been inherited). Do NOT drop our
+ * reference, as it has been re-used, and leave the context as
+ * active.
+ *
+ * If no new atoms have been started then KCTX_ACTIVE will still
+ * be false and atoms_pulled will be zero, so drop the reference
+ * and call kbase_jm_idle_ctx().
+ *
+ * As the checks are done under both the queue_mutex and
+ * hwaccess_lock is should be impossible for this to race
+ * with the scheduler code.
+ */
+ if (kbase_ctx_flag(kctx, KCTX_ACTIVE) ||
+ !atomic_read(&kctx->atoms_pulled)) {
+ /* Calling kbase_jm_idle_ctx() here will ensure that
+ * atoms are not fast-started when we drop the
+ * hwaccess_lock. This is not performed if
+ * KCTX_ACTIVE is set as in that case another pm
+ * reference has been taken and a fast-start would be
+ * valid.
+ */
+ if (!kbase_ctx_flag(kctx, KCTX_ACTIVE))
+ kbase_jm_idle_ctx(kbdev, kctx);
+ context_idle = true;
+ } else {
+ kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_devdata->queue_mutex);
+ }
+
+ /*
+ * Transaction complete
+ */
+ mutex_unlock(&jctx->lock);
+
+ /* Job is now no longer running, so can now safely release the context
+ * reference, and handle any actions that were logged against the atom's retained state */
+
+ kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
+
+ kbase_js_sched_all(kbdev);
+
+ if (!atomic_dec_return(&kctx->work_count)) {
+ /* If worker now idle then post all events that jd_done_nolock()
+ * has queued */
+ mutex_lock(&jctx->lock);
+ while (!list_empty(&kctx->completed_jobs)) {
+ struct kbase_jd_atom *atom = list_entry(
+ kctx->completed_jobs.next,
+ struct kbase_jd_atom, jd_item);
+ list_del(kctx->completed_jobs.next);
+
+ kbase_event_post(kctx, atom);
+ }
+ mutex_unlock(&jctx->lock);
+ }
+
+ kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
+ coreref_state);
+
+ if (context_idle)
+ kbase_pm_context_idle(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0);
+}
+
+/**
+ * jd_cancel_worker - Work queue job cancel function.
+ * @data: a &struct work_struct
+ *
+ * Only called as part of 'Zapping' a context (which occurs on termination).
+ * Operates serially with the kbase_jd_done_worker() on the work queue.
+ *
+ * This can only be called on contexts that aren't scheduled.
+ *
+ * We don't need to release most of the resources that would occur on
+ * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be
+ * running (by virtue of only being called on contexts that aren't
+ * scheduled).
+ */
+static void jd_cancel_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+ struct kbase_jd_context *jctx;
+ struct kbase_context *kctx;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool need_to_try_schedule_context;
+ bool attr_state_changed;
+ struct kbase_device *kbdev;
+
+ /* Soft jobs should never reach this function */
+ KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+ kctx = katom->kctx;
+ kbdev = kctx->kbdev;
+ jctx = &kctx->jctx;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0);
+
+ /* This only gets called on contexts that are scheduled out. Hence, we must
+ * make sure we don't de-ref the number of running jobs (there aren't
+ * any), nor must we try to schedule out the context (it's already
+ * scheduled out).
+ */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ /* Scheduler: Remove the job from the system */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_lock(&jctx->lock);
+
+ need_to_try_schedule_context = jd_done_nolock(katom, NULL);
+ /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
+ * schedule the context. There's also no need for the jsctx_mutex to have been taken
+ * around this too. */
+ KBASE_DEBUG_ASSERT(!need_to_try_schedule_context);
+
+ /* katom may have been freed now, do not use! */
+ mutex_unlock(&jctx->lock);
+
+ if (attr_state_changed)
+ kbase_js_sched_all(kbdev);
+}
+
+/**
+ * kbase_jd_done - Complete a job that has been removed from the Hardware
+ * @katom: atom which has been completed
+ * @slot_nr: slot the atom was on
+ * @end_timestamp: completion time
+ * @done_code: completion code
+ *
+ * This must be used whenever a job has been removed from the Hardware, e.g.:
+ * An IRQ indicates that the job finished (for both error and 'done' codes), or
+ * the job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop.
+ *
+ * Some work is carried out immediately, and the rest is deferred onto a
+ * workqueue
+ *
+ * Context:
+ * This can be called safely from atomic context.
+ * The caller must hold kbdev->hwaccess_lock
+ */
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
+ ktime_t *end_timestamp, kbasep_js_atom_done_code done_code)
+{
+ struct kbase_context *kctx;
+ struct kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(katom);
+ kctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(kctx);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT)
+ katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0);
+
+ kbase_job_check_leave_disjoint(kbdev, katom);
+
+ katom->slot_nr = slot_nr;
+
+ atomic_inc(&kctx->work_count);
+
+#ifdef CONFIG_DEBUG_FS
+ /* a failed job happened and is waiting for dumping*/
+ if (!katom->will_fail_event_code &&
+ kbase_debug_job_fault_process(katom, katom->event_code))
+ return;
+#endif
+
+ WARN_ON(work_pending(&katom->work));
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, kbase_jd_done_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_done);
+
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx;
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != katom);
+ kctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
+
+ /* This should only be done from a context that is not scheduled */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ WARN_ON(work_pending(&katom->work));
+
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, jd_cancel_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+
+void kbase_jd_zap_context(struct kbase_context *kctx)
+{
+ struct kbase_jd_atom *katom;
+ struct list_head *entry, *tmp;
+ struct kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ kbdev = kctx->kbdev;
+
+ KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
+
+ kbase_js_zap_context(kctx);
+
+ mutex_lock(&kctx->jctx.lock);
+
+ /*
+ * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are
+ * queued outside the job scheduler.
+ */
+
+ del_timer_sync(&kctx->soft_job_timeout);
+ list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+ katom = list_entry(entry, struct kbase_jd_atom, queue);
+ kbase_cancel_soft_job(katom);
+ }
+
+
+#ifdef CONFIG_KDS
+
+ /* For each job waiting on a kds resource, cancel the wait and force the job to
+ * complete early, this is done so that we don't leave jobs outstanding waiting
+ * on kds resources which may never be released when contexts are zapped, resulting
+ * in a hang.
+ *
+ * Note that we can safely iterate over the list as the struct kbase_jd_context lock is held,
+ * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job.
+ */
+
+ list_for_each(entry, &kctx->waiting_kds_resource) {
+ katom = list_entry(entry, struct kbase_jd_atom, node);
+
+ kbase_cancel_kds_wait_job(katom);
+ }
+#endif
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ kbase_dma_fence_cancel_all_atoms(kctx);
+#endif
+
+ mutex_unlock(&kctx->jctx.lock);
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ /* Flush dma-fence workqueue to ensure that any callbacks that may have
+ * been queued are done before continuing.
+ */
+ flush_workqueue(kctx->dma_fence.wq);
+#endif
+
+ kbase_jm_wait_for_zero_jobs(kctx);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_zap_context);
+
+int kbase_jd_init(struct kbase_context *kctx)
+{
+ int i;
+ int mali_err = 0;
+#ifdef CONFIG_KDS
+ int err;
+#endif /* CONFIG_KDS */
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ kctx->jctx.job_done_wq = alloc_workqueue("mali_jd",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (NULL == kctx->jctx.job_done_wq) {
+ mali_err = -ENOMEM;
+ goto out1;
+ }
+
+ for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
+ init_waitqueue_head(&kctx->jctx.atoms[i].completed);
+
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]);
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]);
+
+ /* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */
+ kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
+ kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
+
+#ifdef CONFIG_MALI_DMA_FENCE
+ kctx->jctx.atoms[i].dma_fence.context = fence_context_alloc(1);
+ atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0);
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks);
+#endif
+ }
+
+ mutex_init(&kctx->jctx.lock);
+
+ init_waitqueue_head(&kctx->jctx.zero_jobs_wait);
+
+ spin_lock_init(&kctx->jctx.tb_lock);
+
+#ifdef CONFIG_KDS
+ err = kds_callback_init(&kctx->jctx.kds_cb, 0, kds_dep_clear);
+ if (0 != err) {
+ mali_err = -EINVAL;
+ goto out2;
+ }
+#endif /* CONFIG_KDS */
+
+ kctx->jctx.job_nr = 0;
+ INIT_LIST_HEAD(&kctx->completed_jobs);
+ atomic_set(&kctx->work_count, 0);
+
+ return 0;
+
+#ifdef CONFIG_KDS
+ out2:
+ destroy_workqueue(kctx->jctx.job_done_wq);
+#endif /* CONFIG_KDS */
+ out1:
+ return mali_err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_init);
+
+void kbase_jd_exit(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+
+#ifdef CONFIG_KDS
+ kds_callback_term(&kctx->jctx.kds_cb);
+#endif /* CONFIG_KDS */
+ /* Work queue is emptied by this */
+ destroy_workqueue(kctx->jctx.job_done_wq);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_exit);
diff --git a/midgard-0.r2p0/mali_kbase_jd_debugfs.c b/midgard-0.r2p0/mali_kbase_jd_debugfs.c
new file mode 100755
index 0000000..6437e42
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_jd_debugfs.c
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/seq_file.h>
+
+#include <mali_kbase.h>
+
+#include <mali_kbase_jd_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * kbasep_jd_debugfs_atoms_show - Show callback for the JD atoms debugfs file.
+ * @sfile: The debugfs entry
+ * @data: Data associated with the entry
+ *
+ * This function is called to get the contents of the JD atoms debugfs file.
+ * This is a report of all atoms managed by kbase_jd_context.atoms
+ *
+ * Return: 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data)
+{
+ struct kbase_context *kctx = sfile->private;
+ struct kbase_jd_atom *atoms;
+ unsigned long irq_flags;
+ int i;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ /* Print version */
+ seq_printf(sfile, "v%u\n", MALI_JD_DEBUGFS_VERSION);
+
+ /* Print U/K API version */
+ seq_printf(sfile, "ukv%u.%u\n", BASE_UK_VERSION_MAJOR,
+ BASE_UK_VERSION_MINOR);
+
+ /* Print table heading */
+ seq_puts(sfile, "atom id,core reqs,status,coreref status,predeps,start time,time on gpu\n");
+
+ atoms = kctx->jctx.atoms;
+ /* General atom states */
+ mutex_lock(&kctx->jctx.lock);
+ /* JS-related states */
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+ for (i = 0; i != BASE_JD_ATOM_COUNT; ++i) {
+ struct kbase_jd_atom *atom = &atoms[i];
+ s64 start_timestamp = 0;
+
+ if (atom->status == KBASE_JD_ATOM_STATE_UNUSED)
+ continue;
+
+ /* start_timestamp is cleared as soon as the atom leaves UNUSED state
+ * and set before a job is submitted to the h/w, a non-zero value means
+ * it is valid */
+ if (ktime_to_ns(atom->start_timestamp))
+ start_timestamp = ktime_to_ns(
+ ktime_sub(ktime_get(), atom->start_timestamp));
+
+ seq_printf(sfile,
+ "%i,%u,%u,%u,%u %u,%lli,%llu\n",
+ i, atom->core_req, atom->status, atom->coreref_state,
+ (unsigned)(atom->dep[0].atom ?
+ atom->dep[0].atom - atoms : 0),
+ (unsigned)(atom->dep[1].atom ?
+ atom->dep[1].atom - atoms : 0),
+ (signed long long)start_timestamp,
+ (unsigned long long)(atom->time_spent_us ?
+ atom->time_spent_us * 1000 : start_timestamp)
+ );
+ }
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+ mutex_unlock(&kctx->jctx.lock);
+
+ return 0;
+}
+
+
+/**
+ * kbasep_jd_debugfs_atoms_open - open operation for atom debugfs file
+ * @in: &struct inode pointer
+ * @file: &struct file pointer
+ *
+ * Return: file descriptor
+ */
+static int kbasep_jd_debugfs_atoms_open(struct inode *in, struct file *file)
+{
+ return single_open(file, kbasep_jd_debugfs_atoms_show, in->i_private);
+}
+
+static const struct file_operations kbasep_jd_debugfs_atoms_fops = {
+ .open = kbasep_jd_debugfs_atoms_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ /* Expose all atoms */
+ debugfs_create_file("atoms", S_IRUGO, kctx->kctx_dentry, kctx,
+ &kbasep_jd_debugfs_atoms_fops);
+
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/midgard-0.r2p0/mali_kbase_jd_debugfs.h b/midgard-0.r2p0/mali_kbase_jd_debugfs.h
new file mode 100755
index 0000000..090f816
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_jd_debugfs.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_jd_debugfs.h
+ * Header file for job dispatcher-related entries in debugfs
+ */
+
+#ifndef _KBASE_JD_DEBUGFS_H
+#define _KBASE_JD_DEBUGFS_H
+
+#include <linux/debugfs.h>
+
+#include <mali_kbase.h>
+
+#define MALI_JD_DEBUGFS_VERSION 1
+
+/**
+ * kbasep_jd_debugfs_ctx_init() - Add debugfs entries for JD system
+ *
+ * @kctx Pointer to kbase_context
+ */
+void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx);
+
+#endif /*_KBASE_JD_DEBUGFS_H*/
diff --git a/midgard-0.r2p0/mali_kbase_jm.c b/midgard-0.r2p0/mali_kbase_jm.c
new file mode 100755
index 0000000..0c5c6a6
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_jm.c
@@ -0,0 +1,131 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * HW access job manager common APIs
+ */
+
+#include <mali_kbase.h>
+#include "mali_kbase_hwaccess_jm.h"
+#include "mali_kbase_jm.h"
+
+/**
+ * kbase_jm_next_job() - Attempt to run the next @nr_jobs_to_submit jobs on slot
+ * @js on the active context.
+ * @kbdev: Device pointer
+ * @js: Job slot to run on
+ * @nr_jobs_to_submit: Number of jobs to attempt to submit
+ *
+ * Return: true if slot can still be submitted on, false if slot is now full.
+ */
+static bool kbase_jm_next_job(struct kbase_device *kbdev, int js,
+ int nr_jobs_to_submit)
+{
+ struct kbase_context *kctx;
+ int i;
+
+ kctx = kbdev->hwaccess.active_kctx;
+
+ if (!kctx)
+ return true;
+
+ for (i = 0; i < nr_jobs_to_submit; i++) {
+ struct kbase_jd_atom *katom = kbase_js_pull(kctx, js);
+
+ if (!katom)
+ return true; /* Context has no jobs on this slot */
+
+ kbase_backend_run_atom(kbdev, katom);
+ }
+
+ return false; /* Slot ringbuffer should now be full */
+}
+
+u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask)
+{
+ u32 ret_mask = 0;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ while (js_mask) {
+ int js = ffs(js_mask) - 1;
+ int nr_jobs_to_submit = kbase_backend_slot_free(kbdev, js);
+
+ if (kbase_jm_next_job(kbdev, js, nr_jobs_to_submit))
+ ret_mask |= (1 << js);
+
+ js_mask &= ~(1 << js);
+ }
+
+ return ret_mask;
+}
+
+void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!down_trylock(&js_devdata->schedule_sem)) {
+ kbase_jm_kick(kbdev, js_mask);
+ up(&js_devdata->schedule_sem);
+ }
+}
+
+void kbase_jm_try_kick_all(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!down_trylock(&js_devdata->schedule_sem)) {
+ kbase_jm_kick_all(kbdev);
+ up(&js_devdata->schedule_sem);
+ }
+}
+
+void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->hwaccess.active_kctx == kctx)
+ kbdev->hwaccess.active_kctx = NULL;
+}
+
+struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (katom->event_code != BASE_JD_EVENT_STOPPED &&
+ katom->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT) {
+ return kbase_js_complete_atom(katom, NULL);
+ } else {
+ kbase_js_unpull(katom->kctx, katom);
+ return NULL;
+ }
+}
+
+struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, ktime_t *end_timestamp)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ return kbase_js_complete_atom(katom, end_timestamp);
+}
+
diff --git a/midgard-0.r2p0/mali_kbase_jm.h b/midgard-0.r2p0/mali_kbase_jm.h
new file mode 100755
index 0000000..a74ee24
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_jm.h
@@ -0,0 +1,110 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+/*
+ * Job manager common APIs
+ */
+
+#ifndef _KBASE_JM_H_
+#define _KBASE_JM_H_
+
+/**
+ * kbase_jm_kick() - Indicate that there are jobs ready to run.
+ * @kbdev: Device pointer
+ * @js_mask: Mask of the job slots that can be pulled from.
+ *
+ * Caller must hold the hwaccess_lock and schedule_sem semaphore
+ *
+ * Return: Mask of the job slots that can still be submitted to.
+ */
+u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask);
+
+/**
+ * kbase_jm_kick_all() - Indicate that there are jobs ready to run on all job
+ * slots.
+ * @kbdev: Device pointer
+ *
+ * Caller must hold the hwaccess_lock and schedule_sem semaphore
+ *
+ * Return: Mask of the job slots that can still be submitted to.
+ */
+static inline u32 kbase_jm_kick_all(struct kbase_device *kbdev)
+{
+ return kbase_jm_kick(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
+}
+
+/**
+ * kbase_jm_try_kick - Attempt to call kbase_jm_kick
+ * @kbdev: Device pointer
+ * @js_mask: Mask of the job slots that can be pulled from
+ * Context: Caller must hold hwaccess_lock
+ *
+ * If schedule_sem can be immediately obtained then this function will call
+ * kbase_jm_kick() otherwise it will do nothing.
+ */
+void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask);
+
+/**
+ * kbase_jm_try_kick_all() - Attempt to call kbase_jm_kick_all
+ * @kbdev: Device pointer
+ * Context: Caller must hold hwaccess_lock
+ *
+ * If schedule_sem can be immediately obtained then this function will call
+ * kbase_jm_kick_all() otherwise it will do nothing.
+ */
+void kbase_jm_try_kick_all(struct kbase_device *kbdev);
+
+/**
+ * kbase_jm_idle_ctx() - Mark a context as idle.
+ * @kbdev: Device pointer
+ * @kctx: Context to mark as idle
+ *
+ * No more atoms will be pulled from this context until it is marked as active
+ * by kbase_js_use_ctx().
+ *
+ * The context should have no atoms currently pulled from it
+ * (kctx->atoms_pulled == 0).
+ *
+ * Caller must hold the hwaccess_lock
+ */
+void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * kbase_jm_return_atom_to_js() - Return an atom to the job scheduler that has
+ * been soft-stopped or will fail due to a
+ * dependency
+ * @kbdev: Device pointer
+ * @katom: Atom that has been stopped or will be failed
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+/**
+ * kbase_jm_complete() - Complete an atom
+ * @kbdev: Device pointer
+ * @katom: Atom that has completed
+ * @end_timestamp: Timestamp of atom completion
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom, ktime_t *end_timestamp);
+
+#endif /* _KBASE_JM_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_js.c b/midgard-0.r2p0/mali_kbase_js.c
new file mode 100755
index 0000000..7aed324
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_js.c
@@ -0,0 +1,2947 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Job Scheduler Implementation
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_js.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_hw.h>
+
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
+
+#include "mali_kbase_jm.h"
+#include "mali_kbase_hwaccess_jm.h"
+
+/*
+ * Private types
+ */
+
+/* Bitpattern indicating the result of releasing a context */
+enum {
+ /* The context was descheduled - caller should try scheduling in a new
+ * one to keep the runpool full */
+ KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
+ /* Ctx attributes were changed - caller should try scheduling all
+ * contexts */
+ KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1)
+};
+
+typedef u32 kbasep_js_release_result;
+
+const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = {
+ KBASE_JS_ATOM_SCHED_PRIO_MED, /* BASE_JD_PRIO_MEDIUM */
+ KBASE_JS_ATOM_SCHED_PRIO_HIGH, /* BASE_JD_PRIO_HIGH */
+ KBASE_JS_ATOM_SCHED_PRIO_LOW /* BASE_JD_PRIO_LOW */
+};
+
+const base_jd_prio
+kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = {
+ BASE_JD_PRIO_HIGH, /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */
+ BASE_JD_PRIO_MEDIUM, /* KBASE_JS_ATOM_SCHED_PRIO_MED */
+ BASE_JD_PRIO_LOW /* KBASE_JS_ATOM_SCHED_PRIO_LOW */
+};
+
+
+/*
+ * Private function prototypes
+ */
+static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
+ struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbasep_js_atom_retained_state *katom_retained_state);
+
+static int kbase_js_get_slot(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
+ kbasep_js_policy_ctx_job_cb callback);
+
+/* Helper for trace subcodes */
+#if KBASE_TRACE_ENABLE
+static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ int as_nr;
+ int refcnt = 0;
+
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ struct kbasep_js_per_as_data *js_per_as_data;
+
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ refcnt = js_per_as_data->as_busy_refcount;
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return refcnt;
+}
+
+static int kbasep_js_trace_get_refcnt_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ int as_nr;
+ int refcnt = 0;
+
+ js_devdata = &kbdev->js_data;
+
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ struct kbasep_js_per_as_data *js_per_as_data;
+
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ refcnt = js_per_as_data->as_busy_refcount;
+ }
+
+ return refcnt;
+}
+#else /* KBASE_TRACE_ENABLE */
+static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(kctx);
+ return 0;
+}
+static int kbasep_js_trace_get_refcnt_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(kctx);
+ return 0;
+}
+#endif /* KBASE_TRACE_ENABLE */
+
+/*
+ * Private types
+ */
+enum {
+ JS_DEVDATA_INIT_NONE = 0,
+ JS_DEVDATA_INIT_CONSTANTS = (1 << 0),
+ JS_DEVDATA_INIT_POLICY = (1 << 1),
+ JS_DEVDATA_INIT_ALL = ((1 << 2) - 1)
+};
+
+enum {
+ JS_KCTX_INIT_NONE = 0,
+ JS_KCTX_INIT_CONSTANTS = (1 << 0),
+ JS_KCTX_INIT_POLICY = (1 << 1),
+ JS_KCTX_INIT_ALL = ((1 << 2) - 1)
+};
+
+/*
+ * Private functions
+ */
+
+/**
+ * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements
+ * @features: JSn_FEATURE register value
+ *
+ * Given a JSn_FEATURE register value returns the core requirements that match
+ *
+ * Return: Core requirement bit mask
+ */
+static base_jd_core_req core_reqs_from_jsn_features(u16 features)
+{
+ base_jd_core_req core_req = 0u;
+
+ if ((features & JS_FEATURE_SET_VALUE_JOB) != 0)
+ core_req |= BASE_JD_REQ_V;
+
+ if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0)
+ core_req |= BASE_JD_REQ_CF;
+
+ if ((features & JS_FEATURE_COMPUTE_JOB) != 0)
+ core_req |= BASE_JD_REQ_CS;
+
+ if ((features & JS_FEATURE_TILER_JOB) != 0)
+ core_req |= BASE_JD_REQ_T;
+
+ if ((features & JS_FEATURE_FRAGMENT_JOB) != 0)
+ core_req |= BASE_JD_REQ_FS;
+
+ return core_req;
+}
+
+static void kbase_js_sync_timers(struct kbase_device *kbdev)
+{
+ mutex_lock(&kbdev->js_data.runpool_mutex);
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&kbdev->js_data.runpool_mutex);
+}
+
+/* Hold the hwaccess_lock for this */
+bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_per_as_data *js_per_as_data;
+ bool result = false;
+ int as_nr;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_devdata = &kbdev->js_data;
+
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ int new_refcnt;
+
+ KBASE_DEBUG_ASSERT(as_nr >= 0);
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ KBASE_DEBUG_ASSERT(js_per_as_data->kctx != NULL);
+
+ new_refcnt = ++(js_per_as_data->as_busy_refcount);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx,
+ NULL, 0u, new_refcnt);
+ result = true;
+ }
+
+ return result;
+}
+
+/**
+ * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js: Job slot id to check.
+ * @prio: Priority to check.
+ *
+ * Return true if there are no atoms to pull. There may be running atoms in the
+ * ring buffer even if there are no atoms to pull. It is also possible for the
+ * ring buffer to be full (with running atoms) when this functions returns
+ * true.
+ *
+ * Return: true if there are no atoms to pull, false otherwise.
+ */
+static inline bool
+jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio)
+{
+ struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ return RB_EMPTY_ROOT(&rb->runnable_tree);
+}
+
+/**
+ * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no
+ * pullable atoms
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js: Job slot id to check.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if the ring buffers for all priorities have no pullable atoms,
+ * false otherwise.
+ */
+static inline bool
+jsctx_rb_none_to_pull(struct kbase_context *kctx, int js)
+{
+ int prio;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ if (!jsctx_rb_none_to_pull_prio(kctx, js, prio))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * jsctx_queue_foreach_prio(): - Execute callback for each entry in the queue.
+ * @kctx: Pointer to kbase context with the queue.
+ * @js: Job slot id to iterate.
+ * @prio: Priority id to iterate.
+ * @callback: Function pointer to callback.
+ *
+ * Iterate over a queue and invoke @callback for each entry in the queue, and
+ * remove the entry from the queue.
+ *
+ * If entries are added to the queue while this is running those entries may, or
+ * may not be covered. To ensure that all entries in the buffer have been
+ * enumerated when this function returns jsctx->lock must be held when calling
+ * this function.
+ *
+ * The HW access lock must always be held when calling this function.
+ */
+static void
+jsctx_queue_foreach_prio(struct kbase_context *kctx, int js, int prio,
+ kbasep_js_policy_ctx_job_cb callback)
+{
+ struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ while (!RB_EMPTY_ROOT(&queue->runnable_tree)) {
+ struct rb_node *node = rb_first(&queue->runnable_tree);
+ struct kbase_jd_atom *entry = rb_entry(node,
+ struct kbase_jd_atom, runnable_tree_node);
+
+ rb_erase(node, &queue->runnable_tree);
+ callback(kctx->kbdev, entry);
+ }
+
+ while (!list_empty(&queue->x_dep_head)) {
+ struct kbase_jd_atom *entry = list_entry(queue->x_dep_head.next,
+ struct kbase_jd_atom, queue);
+
+ list_del(queue->x_dep_head.next);
+
+ callback(kctx->kbdev, entry);
+ }
+}
+
+/**
+ * jsctx_queue_foreach(): - Execute callback for each entry in every queue
+ * @kctx: Pointer to kbase context with queue.
+ * @js: Job slot id to iterate.
+ * @callback: Function pointer to callback.
+ *
+ * Iterate over all the different priorities, and for each call
+ * jsctx_queue_foreach_prio() to iterate over the queue and invoke @callback
+ * for each entry, and remove the entry from the queue.
+ */
+static inline void
+jsctx_queue_foreach(struct kbase_context *kctx, int js,
+ kbasep_js_policy_ctx_job_cb callback)
+{
+ int prio;
+
+ for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
+ jsctx_queue_foreach_prio(kctx, js, prio, callback);
+}
+
+/**
+ * jsctx_rb_peek_prio(): - Check buffer and get next atom
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js: Job slot id to check.
+ * @prio: Priority id to check.
+ *
+ * Check the ring buffer for the specified @js and @prio and return a pointer to
+ * the next atom, unless the ring buffer is empty.
+ *
+ * Return: Pointer to next atom in buffer, or NULL if there is no atom.
+ */
+static inline struct kbase_jd_atom *
+jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio)
+{
+ struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+ struct rb_node *node;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ node = rb_first(&rb->runnable_tree);
+ if (!node)
+ return NULL;
+
+ return rb_entry(node, struct kbase_jd_atom, runnable_tree_node);
+}
+
+/**
+ * jsctx_rb_peek(): - Check all priority buffers and get next atom
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js: Job slot id to check.
+ *
+ * Check the ring buffers for all priorities, starting from
+ * KBASE_JS_ATOM_SCHED_PRIO_HIGH, for the specified @js and @prio and return a
+ * pointer to the next atom, unless all the priority's ring buffers are empty.
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * Return: Pointer to next atom in buffer, or NULL if there is no atom.
+ */
+static inline struct kbase_jd_atom *
+jsctx_rb_peek(struct kbase_context *kctx, int js)
+{
+ int prio;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ struct kbase_jd_atom *katom;
+
+ katom = jsctx_rb_peek_prio(kctx, js, prio);
+ if (katom)
+ return katom;
+ }
+
+ return NULL;
+}
+
+/**
+ * jsctx_rb_pull(): - Mark atom in list as running
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @katom: Pointer to katom to pull.
+ *
+ * Mark an atom previously obtained from jsctx_rb_peek() as running.
+ *
+ * @katom must currently be at the head of the ring buffer.
+ */
+static inline void
+jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ int prio = katom->sched_priority;
+ int js = katom->slot_nr;
+ struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ /* Atoms must be pulled in the correct order. */
+ WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio));
+
+ rb_erase(&katom->runnable_tree_node, &rb->runnable_tree);
+}
+
+#define LESS_THAN_WRAP(a, b) ((s32)(a - b) < 0)
+
+static void
+jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ int prio = katom->sched_priority;
+ int js = katom->slot_nr;
+ struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+ struct rb_node **new = &(queue->runnable_tree.rb_node), *parent = NULL;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ while (*new) {
+ struct kbase_jd_atom *entry = container_of(*new,
+ struct kbase_jd_atom, runnable_tree_node);
+
+ parent = *new;
+ if (LESS_THAN_WRAP(katom->age, entry->age))
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&katom->runnable_tree_node, parent, new);
+ rb_insert_color(&katom->runnable_tree_node, &queue->runnable_tree);
+}
+
+/**
+ * jsctx_rb_unpull(): - Undo marking of atom in list as running
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @katom: Pointer to katom to unpull.
+ *
+ * Undo jsctx_rb_pull() and put @katom back in the queue.
+ *
+ * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
+ * pulled.
+ */
+static inline void
+jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ jsctx_tree_add(kctx, katom);
+}
+
+static bool kbase_js_ctx_pullable(struct kbase_context *kctx,
+ int js,
+ bool is_scheduled);
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js);
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js);
+
+/*
+ * Functions private to KBase ('Protected' functions)
+ */
+int kbasep_js_devdata_init(struct kbase_device * const kbdev)
+{
+ struct kbasep_js_device_data *jsdd;
+ int err;
+ int i;
+ u16 as_present;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ jsdd = &kbdev->js_data;
+
+ KBASE_DEBUG_ASSERT(jsdd->init_status == JS_DEVDATA_INIT_NONE);
+
+ /* These two must be recalculated if nr_hw_address_spaces changes
+ * (e.g. for HW workarounds) */
+ as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
+ kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+ bool use_workaround;
+
+ use_workaround = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
+ if (use_workaround) {
+ dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
+ kbdev->nr_user_address_spaces = 1;
+ }
+ }
+#ifdef CONFIG_MALI_DEBUG
+ /* Soft-stop will be disabled on a single context by default unless
+ * softstop_always is set */
+ jsdd->softstop_always = false;
+#endif /* CONFIG_MALI_DEBUG */
+ jsdd->nr_all_contexts_running = 0;
+ jsdd->nr_user_contexts_running = 0;
+ jsdd->nr_contexts_pullable = 0;
+ atomic_set(&jsdd->nr_contexts_runnable, 0);
+ /* All ASs initially free */
+ jsdd->as_free = as_present;
+ /* No ctx allowed to submit */
+ jsdd->runpool_irq.submit_allowed = 0u;
+ memset(jsdd->runpool_irq.ctx_attr_ref_count, 0,
+ sizeof(jsdd->runpool_irq.ctx_attr_ref_count));
+ memset(jsdd->runpool_irq.slot_affinities, 0,
+ sizeof(jsdd->runpool_irq.slot_affinities));
+ memset(jsdd->runpool_irq.slot_affinity_refcount, 0,
+ sizeof(jsdd->runpool_irq.slot_affinity_refcount));
+ INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list);
+
+ /* Config attributes */
+ jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
+ jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
+ jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+ jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
+ else
+ jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS;
+ jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
+ jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+ jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS_8408;
+ else
+ jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
+ jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
+ jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING;
+ jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS;
+ jsdd->cfs_ctx_runtime_init_slices =
+ DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES;
+ jsdd->cfs_ctx_runtime_min_slices =
+ DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES;
+ atomic_set(&jsdd->soft_job_timeout_ms, DEFAULT_JS_SOFT_JOB_TIMEOUT);
+
+ dev_dbg(kbdev->dev, "JS Config Attribs: ");
+ dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u",
+ jsdd->scheduling_period_ns);
+ dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u",
+ jsdd->soft_stop_ticks);
+ dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u",
+ jsdd->soft_stop_ticks_cl);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u",
+ jsdd->hard_stop_ticks_ss);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u",
+ jsdd->hard_stop_ticks_cl);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u",
+ jsdd->hard_stop_ticks_dumping);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u",
+ jsdd->gpu_reset_ticks_ss);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u",
+ jsdd->gpu_reset_ticks_cl);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u",
+ jsdd->gpu_reset_ticks_dumping);
+ dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u",
+ jsdd->ctx_timeslice_ns);
+ dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u",
+ jsdd->cfs_ctx_runtime_init_slices);
+ dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u",
+ jsdd->cfs_ctx_runtime_min_slices);
+ dev_dbg(kbdev->dev, "\tsoft_job_timeout:%i",
+ atomic_read(&jsdd->soft_job_timeout_ms));
+
+ if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss &&
+ jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss &&
+ jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping &&
+ jsdd->hard_stop_ticks_dumping <
+ jsdd->gpu_reset_ticks_dumping)) {
+ dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n");
+ return -EINVAL;
+ }
+
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+ dev_dbg(kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.",
+ jsdd->soft_stop_ticks,
+ jsdd->scheduling_period_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_HARD_STOPS
+ dev_dbg(kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.",
+ jsdd->hard_stop_ticks_ss,
+ jsdd->hard_stop_ticks_dumping,
+ jsdd->scheduling_period_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS
+ dev_dbg(kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
+#endif
+
+ /* setup the number of irq throttle cycles base on given time */
+ {
+ int time_us = kbdev->gpu_props.irq_throttle_time_us;
+ int cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev,
+ time_us);
+
+ atomic_set(&kbdev->irq_throttle_cycles, cycles);
+ }
+
+ /* Clear the AS data, including setting NULL pointers */
+ memset(&jsdd->runpool_irq.per_as_data[0], 0,
+ sizeof(jsdd->runpool_irq.per_as_data));
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
+ jsdd->js_reqs[i] = core_reqs_from_jsn_features(
+ kbdev->gpu_props.props.raw_props.js_features[i]);
+
+ jsdd->init_status |= JS_DEVDATA_INIT_CONSTANTS;
+
+ /* On error, we could continue on: providing none of the below resources
+ * rely on the ones above */
+
+ mutex_init(&jsdd->runpool_mutex);
+ mutex_init(&jsdd->queue_mutex);
+ spin_lock_init(&kbdev->hwaccess_lock);
+ sema_init(&jsdd->schedule_sem, 1);
+
+ err = kbasep_js_policy_init(kbdev);
+ if (!err)
+ jsdd->init_status |= JS_DEVDATA_INIT_POLICY;
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
+ INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i]);
+ INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i]);
+ }
+
+ /* On error, do no cleanup; this will be handled by the caller(s), since
+ * we've designed this resource to be safe to terminate on init-fail */
+ if (jsdd->init_status != JS_DEVDATA_INIT_ALL)
+ return -EINVAL;
+
+ return 0;
+}
+
+void kbasep_js_devdata_halt(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbasep_js_devdata_term(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ if ((js_devdata->init_status & JS_DEVDATA_INIT_CONSTANTS)) {
+ s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
+ /* The caller must de-register all contexts before calling this
+ */
+ KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
+ KBASE_DEBUG_ASSERT(memcmp(
+ js_devdata->runpool_irq.ctx_attr_ref_count,
+ zero_ctx_attr_ref_count,
+ sizeof(zero_ctx_attr_ref_count)) == 0);
+ CSTD_UNUSED(zero_ctx_attr_ref_count);
+ }
+ if ((js_devdata->init_status & JS_DEVDATA_INIT_POLICY))
+ kbasep_js_policy_term(&js_devdata->policy);
+
+ js_devdata->init_status = JS_DEVDATA_INIT_NONE;
+}
+
+int kbasep_js_kctx_init(struct kbase_context * const kctx)
+{
+ struct kbase_device *kbdev;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ int err;
+ int i, j;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
+ INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+ KBASE_DEBUG_ASSERT(js_kctx_info->init_status == JS_KCTX_INIT_NONE);
+
+ js_kctx_info->ctx.nr_jobs = 0;
+ kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+ kbase_ctx_flag_clear(kctx, KCTX_DYING);
+ memset(js_kctx_info->ctx.ctx_attr_ref_count, 0,
+ sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
+
+ /* Initially, the context is disabled from submission until the create
+ * flags are set */
+ kbase_ctx_flag_set(kctx, KCTX_SUBMIT_DISABLED);
+
+ js_kctx_info->init_status |= JS_KCTX_INIT_CONSTANTS;
+
+ /* On error, we could continue on: providing none of the below resources
+ * rely on the ones above */
+ mutex_init(&js_kctx_info->ctx.jsctx_mutex);
+
+ init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
+
+ err = kbasep_js_policy_init_ctx(kbdev, kctx);
+ if (!err)
+ js_kctx_info->init_status |= JS_KCTX_INIT_POLICY;
+
+ /* On error, do no cleanup; this will be handled by the caller(s), since
+ * we've designed this resource to be safe to terminate on init-fail */
+ if (js_kctx_info->init_status != JS_KCTX_INIT_ALL)
+ return -EINVAL;
+
+ for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
+ for (j = 0; j < BASE_JM_MAX_NR_SLOTS; j++) {
+ INIT_LIST_HEAD(&kctx->jsctx_queue[i][j].x_dep_head);
+ kctx->jsctx_queue[i][j].runnable_tree = RB_ROOT;
+ }
+ }
+
+ return 0;
+}
+
+void kbasep_js_kctx_term(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ union kbasep_js_policy *js_policy;
+ int js;
+ bool update_ctx_count = false;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_policy = &kbdev->js_data.policy;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ if ((js_kctx_info->init_status & JS_KCTX_INIT_CONSTANTS)) {
+ /* The caller must de-register all jobs before calling this */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
+ }
+
+ mutex_lock(&kbdev->js_data.queue_mutex);
+ mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ if (kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)) {
+ WARN_ON(atomic_read(&kbdev->js_data.nr_contexts_runnable) <= 0);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ update_ctx_count = true;
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ }
+
+ mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&kbdev->js_data.queue_mutex);
+
+ if ((js_kctx_info->init_status & JS_KCTX_INIT_POLICY))
+ kbasep_js_policy_term_ctx(js_policy, kctx);
+
+ js_kctx_info->init_status = JS_KCTX_INIT_NONE;
+
+ if (update_ctx_count) {
+ mutex_lock(&kbdev->js_data.runpool_mutex);
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&kbdev->js_data.runpool_mutex);
+ }
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_nolock - Variant of
+ * kbase_jd_ctx_list_add_pullable()
+ * where the caller must hold
+ * hwaccess_lock
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+ &kbdev->js_data.ctx_list_pullable[js]);
+
+ if (!kctx->slots_pullable) {
+ kbdev->js_data.nr_contexts_pullable++;
+ ret = true;
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+ atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+ }
+ }
+ kctx->slots_pullable |= (1 << js);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_head_nolock - Variant of
+ * kbase_js_ctx_list_add_pullable_head()
+ * where the caller must hold
+ * hwaccess_lock
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head_nolock(
+ struct kbase_device *kbdev, struct kbase_context *kctx, int js)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+ &kbdev->js_data.ctx_list_pullable[js]);
+
+ if (!kctx->slots_pullable) {
+ kbdev->js_data.nr_contexts_pullable++;
+ ret = true;
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+ atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+ }
+ }
+ kctx->slots_pullable |= (1 << js);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
+ * per-slot pullable context queue
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * If the context is on either the pullable or unpullable queues, then it is
+ * removed before being added to the head.
+ *
+ * This function should be used when a context has been scheduled, but no jobs
+ * can currently be pulled from it.
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ ret = kbase_js_ctx_list_add_pullable_head_nolock(kbdev, kctx, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_unpullable_nolock - Add context to the tail of the
+ * per-slot unpullable context queue
+ * @kbdev: Device pointer
+ * @kctx: Context to add to queue
+ * @js: Job slot to use
+ *
+ * The context must already be on the per-slot pullable queue. It will be
+ * removed from the pullable queue before being added to the unpullable queue.
+ *
+ * This function should be used when a context has been pulled from, and there
+ * are no jobs remaining on the specified slot.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+ &kbdev->js_data.ctx_list_unpullable[js]);
+
+ if (kctx->slots_pullable == (1 << js)) {
+ kbdev->js_data.nr_contexts_pullable--;
+ ret = true;
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ }
+ }
+ kctx->slots_pullable &= ~(1 << js);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_remove_nolock - Remove context from the per-slot pullable
+ * or unpullable context queues
+ * @kbdev: Device pointer
+ * @kctx: Context to remove from queue
+ * @js: Job slot to use
+ *
+ * The context must already be on one of the queues.
+ *
+ * This function should be used when a context has no jobs on the GPU, and no
+ * jobs remaining for the specified slot.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ int js)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]));
+
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ if (kctx->slots_pullable == (1 << js)) {
+ kbdev->js_data.nr_contexts_pullable--;
+ ret = true;
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ }
+ }
+ kctx->slots_pullable &= ~(1 << js);
+
+ return ret;
+}
+
+/**
+ * kbase_js_ctx_list_pop_head_nolock - Variant of kbase_js_ctx_list_pop_head()
+ * where the caller must hold
+ * hwaccess_lock
+ * @kbdev: Device pointer
+ * @js: Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: Context to use for specified slot.
+ * NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head_nolock(
+ struct kbase_device *kbdev,
+ int js)
+{
+ struct kbase_context *kctx;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (list_empty(&kbdev->js_data.ctx_list_pullable[js]))
+ return NULL;
+
+ kctx = list_entry(kbdev->js_data.ctx_list_pullable[js].next,
+ struct kbase_context,
+ jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+ return kctx;
+}
+
+/**
+ * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
+ * queue.
+ * @kbdev: Device pointer
+ * @js: Job slot to use
+ *
+ * Return: Context to use for specified slot.
+ * NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head(
+ struct kbase_device *kbdev, int js)
+{
+ struct kbase_context *kctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kctx = kbase_js_ctx_list_pop_head_nolock(kbdev, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return kctx;
+}
+
+/**
+ * kbase_js_ctx_pullable - Return if a context can be pulled from on the
+ * specified slot
+ * @kctx: Context pointer
+ * @js: Job slot to use
+ * @is_scheduled: true if the context is currently scheduled
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if context can be pulled from on specified slot
+ * false otherwise
+ */
+static bool kbase_js_ctx_pullable(struct kbase_context *kctx, int js,
+ bool is_scheduled)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_jd_atom *katom;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ js_devdata = &kctx->kbdev->js_data;
+
+ if (is_scheduled) {
+ if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+ return false;
+ }
+ katom = jsctx_rb_peek(kctx, js);
+ if (!katom)
+ return false; /* No pullable atoms */
+ if (atomic_read(&katom->blocked))
+ return false; /* next atom blocked */
+ if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
+ if (katom->x_pre_dep->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
+ katom->x_pre_dep->will_fail_event_code)
+ return false;
+ if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
+ kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
+ return false;
+ }
+
+ return true;
+}
+
+static bool kbase_js_dep_validate(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ bool ret = true;
+ bool has_dep = false, has_x_dep = false;
+ int js = kbase_js_get_slot(kbdev, katom);
+ int prio = katom->sched_priority;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
+
+ if (dep_atom) {
+ int dep_js = kbase_js_get_slot(kbdev, dep_atom);
+ int dep_prio = dep_atom->sched_priority;
+
+ /* Dependent atom must already have been submitted */
+ if (!(dep_atom->atom_flags &
+ KBASE_KATOM_FLAG_JSCTX_IN_TREE)) {
+ ret = false;
+ break;
+ }
+
+ /* Dependencies with different priorities can't
+ be represented in the ringbuffer */
+ if (prio != dep_prio) {
+ ret = false;
+ break;
+ }
+
+ if (js == dep_js) {
+ /* Only one same-slot dependency can be
+ * represented in the ringbuffer */
+ if (has_dep) {
+ ret = false;
+ break;
+ }
+ /* Each dependee atom can only have one
+ * same-slot dependency */
+ if (dep_atom->post_dep) {
+ ret = false;
+ break;
+ }
+ has_dep = true;
+ } else {
+ /* Only one cross-slot dependency can be
+ * represented in the ringbuffer */
+ if (has_x_dep) {
+ ret = false;
+ break;
+ }
+ /* Each dependee atom can only have one
+ * cross-slot dependency */
+ if (dep_atom->x_post_dep) {
+ ret = false;
+ break;
+ }
+ /* The dependee atom can not already be in the
+ * HW access ringbuffer */
+ if (dep_atom->gpu_rb_state !=
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ ret = false;
+ break;
+ }
+ /* The dependee atom can not already have
+ * completed */
+ if (dep_atom->status !=
+ KBASE_JD_ATOM_STATE_IN_JS) {
+ ret = false;
+ break;
+ }
+ /* Cross-slot dependencies must not violate
+ * PRLAM-8987 affinity restrictions */
+ if (kbase_hw_has_issue(kbdev,
+ BASE_HW_ISSUE_8987) &&
+ (js == 2 || dep_js == 2)) {
+ ret = false;
+ break;
+ }
+ has_x_dep = true;
+ }
+
+ /* Dependency can be represented in ringbuffers */
+ }
+ }
+
+ /* If dependencies can be represented by ringbuffer then clear them from
+ * atom structure */
+ if (ret) {
+ for (i = 0; i < 2; i++) {
+ struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
+
+ if (dep_atom) {
+ int dep_js = kbase_js_get_slot(kbdev, dep_atom);
+
+ if ((js != dep_js) &&
+ (dep_atom->status !=
+ KBASE_JD_ATOM_STATE_COMPLETED)
+ && (dep_atom->status !=
+ KBASE_JD_ATOM_STATE_HW_COMPLETED)
+ && (dep_atom->status !=
+ KBASE_JD_ATOM_STATE_UNUSED)) {
+
+ katom->atom_flags |=
+ KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+ katom->x_pre_dep = dep_atom;
+ dep_atom->x_post_dep = katom;
+ if (kbase_jd_katom_dep_type(
+ &katom->dep[i]) ==
+ BASE_JD_DEP_TYPE_DATA)
+ katom->atom_flags |=
+ KBASE_KATOM_FLAG_FAIL_BLOCKER;
+ }
+ if ((kbase_jd_katom_dep_type(&katom->dep[i])
+ == BASE_JD_DEP_TYPE_DATA) &&
+ (js == dep_js)) {
+ katom->pre_dep = dep_atom;
+ dep_atom->post_dep = katom;
+ }
+
+ list_del(&katom->dep_item[i]);
+ kbase_jd_katom_dep_clear(&katom->dep[i]);
+ }
+ }
+ }
+
+ return ret;
+}
+
+bool kbasep_js_add_job(struct kbase_context *kctx,
+ struct kbase_jd_atom *atom)
+{
+ unsigned long flags;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+ union kbasep_js_policy *js_policy;
+
+ bool enqueue_required = false;
+ bool timer_sync = false;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ kbdev = kctx->kbdev;
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /*
+ * Begin Runpool transaction
+ */
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ /* Refcount ctx.nr_jobs */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
+ ++(js_kctx_info->ctx.nr_jobs);
+
+ /* Setup any scheduling information */
+ kbasep_js_clear_job_retry_submit(atom);
+
+ /* Lock for state available during IRQ */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (!kbase_js_dep_validate(kctx, atom)) {
+ /* Dependencies could not be represented */
+ --(js_kctx_info->ctx.nr_jobs);
+
+ /* Setting atom status back to queued as it still has unresolved
+ * dependencies */
+ atom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ goto out_unlock;
+ }
+
+ kbase_tlstream_tl_attrib_atom_state(atom, TL_ATOM_STATE_READY);
+ KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom));
+
+ enqueue_required = kbase_js_dep_resolved_submit(kctx, atom);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc,
+ kbasep_js_trace_get_refcnt_nolock(kbdev, kctx));
+
+ /* Context Attribute Refcounting */
+ kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
+
+ if (enqueue_required) {
+ if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false))
+ timer_sync = kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, atom->slot_nr);
+ else
+ timer_sync = kbase_js_ctx_list_add_unpullable_nolock(
+ kbdev, kctx, atom->slot_nr);
+ }
+ /* If this context is active and the atom is the first on its slot,
+ * kick the job manager to attempt to fast-start the atom */
+ if (enqueue_required && kctx == kbdev->hwaccess.active_kctx)
+ kbase_jm_try_kick(kbdev, 1 << atom->slot_nr);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ if (timer_sync)
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ /* End runpool transaction */
+
+ if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+ if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+ /* A job got added while/after kbase_job_zap_context()
+ * was called on a non-scheduled context (e.g. KDS
+ * dependency resolved). Kill that job by killing the
+ * context. */
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx,
+ false);
+ } else if (js_kctx_info->ctx.nr_jobs == 1) {
+ /* Handle Refcount going from 0 to 1: schedule the
+ * context on the Policy Queue */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
+
+ /* Policy Queue was updated - caller must try to
+ * schedule the head context */
+ WARN_ON(!enqueue_required);
+ }
+ }
+out_unlock:
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ return enqueue_required;
+}
+
+void kbasep_js_remove_job(struct kbase_device *kbdev,
+ struct kbase_context *kctx, struct kbase_jd_atom *atom)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbasep_js_device_data *js_devdata;
+ union kbasep_js_policy *js_policy;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc,
+ kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+ /* De-refcount ctx.nr_jobs */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
+ --(js_kctx_info->ctx.nr_jobs);
+}
+
+bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
+ struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ unsigned long flags;
+ struct kbasep_js_atom_retained_state katom_retained_state;
+ struct kbasep_js_device_data *js_devdata;
+ bool attr_state_changed;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+ kbasep_js_remove_job(kbdev, kctx, katom);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* The atom has 'finished' (will not be re-run), so no need to call
+ * kbasep_js_has_atom_finished().
+ *
+ * This is because it returns false for soft-stopped atoms, but we
+ * want to override that, because we're cancelling an atom regardless of
+ * whether it was soft-stopped or not */
+ attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx,
+ &katom_retained_state);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return attr_state_changed;
+}
+
+bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ bool result;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ /* KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_RETAIN_CTX, kctx, NULL, 0,
+ kbasep_js_trace_get_refcnt(kbdev, kctx)); */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return result;
+}
+
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev,
+ int as_nr)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_context *found_kctx = NULL;
+ struct kbasep_js_per_as_data *js_per_as_data;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+ js_devdata = &kbdev->js_data;
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ found_kctx = js_per_as_data->kctx;
+
+ if (found_kctx != NULL)
+ ++(js_per_as_data->as_busy_refcount);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return found_kctx;
+}
+
+struct kbase_context *kbasep_js_runpool_lookup_ctx_nolock(
+ struct kbase_device *kbdev, int as_nr)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_context *found_kctx = NULL;
+ struct kbasep_js_per_as_data *js_per_as_data;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ js_devdata = &kbdev->js_data;
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ found_kctx = js_per_as_data->kctx;
+
+ if (found_kctx != NULL)
+ ++(js_per_as_data->as_busy_refcount);
+
+ return found_kctx;
+}
+
+/**
+ * kbasep_js_release_result - Try running more jobs after releasing a context
+ * and/or atom
+ *
+ * @kbdev: The kbase_device to operate on
+ * @kctx: The kbase_context to operate on
+ * @katom_retained_state: Retained state from the atom
+ * @runpool_ctx_attr_change: True if the runpool context attributes have changed
+ *
+ * This collates a set of actions that must happen whilst hwaccess_lock is held.
+ *
+ * This includes running more jobs when:
+ * - The previously released kctx caused a ctx attribute change,
+ * - The released atom caused a ctx attribute change,
+ * - Slots were previously blocked due to affinity restrictions,
+ * - Submission during IRQ handling failed.
+ *
+ * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were
+ * changed. The caller should try scheduling all contexts
+ */
+static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
+ struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbasep_js_atom_retained_state *katom_retained_state,
+ bool runpool_ctx_attr_change)
+{
+ struct kbasep_js_device_data *js_devdata;
+ kbasep_js_release_result result = 0;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
+ js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (js_devdata->nr_user_contexts_running != 0) {
+ bool retry_submit = false;
+ int retry_jobslot = 0;
+
+ if (katom_retained_state)
+ retry_submit = kbasep_js_get_atom_retry_submit_slot(
+ katom_retained_state, &retry_jobslot);
+
+ if (runpool_ctx_attr_change || retry_submit) {
+ /* A change in runpool ctx attributes might mean we can
+ * run more jobs than before */
+ result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
+
+ KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB,
+ kctx, NULL, 0u, retry_jobslot);
+ }
+ }
+ return result;
+}
+
+/*
+ * Internal function to release the reference on a ctx and an atom's "retained
+ * state", only taking the runpool and as transaction mutexes
+ *
+ * This also starts more jobs running in the case of an ctx-attribute state
+ * change
+ *
+ * This does none of the followup actions for scheduling:
+ * - It does not schedule in a new context
+ * - It does not requeue or handle dying contexts
+ *
+ * For those tasks, just call kbasep_js_runpool_release_ctx() instead
+ *
+ * Requires:
+ * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
+ * - Context has a non-zero refcount
+ * - Caller holds js_kctx_info->ctx.jsctx_mutex
+ * - Caller holds js_devdata->runpool_mutex
+ */
+static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
+ struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ union kbasep_js_policy *js_policy;
+ struct kbasep_js_per_as_data *js_per_as_data;
+
+ kbasep_js_release_result release_result = 0u;
+ bool runpool_ctx_attr_change = false;
+ int kctx_as_nr;
+ struct kbase_as *current_as;
+ int new_ref_count;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ /* kctx->as_nr and js_per_as_data are only read from here. The caller's
+ * js_ctx_mutex provides a barrier that ensures they are up-to-date.
+ *
+ * They will not change whilst we're reading them, because the refcount
+ * is non-zero (and we ASSERT on that last fact).
+ */
+ kctx_as_nr = kctx->as_nr;
+ KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[kctx_as_nr];
+ KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
+
+ /*
+ * Transaction begins on AS and runpool_irq
+ *
+ * Assert about out calling contract
+ */
+ current_as = &kbdev->as[kctx_as_nr];
+ mutex_lock(&kbdev->pm.lock);
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
+ KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
+
+ /* Update refcount */
+ new_ref_count = --(js_per_as_data->as_busy_refcount);
+
+ /* Release the atom if it finished (i.e. wasn't soft-stopped) */
+ if (kbasep_js_has_atom_finished(katom_retained_state))
+ runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(
+ kbdev, kctx, katom_retained_state);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u,
+ new_ref_count);
+
+ if (new_ref_count == 1 && kbase_ctx_flag(kctx, KCTX_PRIVILEGED) &&
+ !kbase_pm_is_suspending(kbdev)) {
+ /* Context is kept scheduled into an address space even when
+ * there are no jobs, in this case we have to handle the
+ * situation where all jobs have been evicted from the GPU and
+ * submission is disabled.
+ *
+ * At this point we re-enable submission to allow further jobs
+ * to be executed
+ */
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+ }
+
+ /* Make a set of checks to see if the context should be scheduled out */
+ if (new_ref_count == 0 &&
+ (!kbasep_js_is_submit_allowed(js_devdata, kctx) ||
+ kbdev->pm.suspending)) {
+ /* Last reference, and we've been told to remove this context
+ * from the Run Pool */
+ dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d",
+ kctx, new_ref_count, js_kctx_info->ctx.nr_jobs,
+ kbasep_js_is_submit_allowed(js_devdata, kctx));
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_mmu_as_released(kctx->as_nr);
+#endif
+ kbase_tlstream_tl_nret_as_ctx(&kbdev->as[kctx->as_nr], kctx);
+
+ kbase_backend_release_ctx_irq(kbdev, kctx);
+
+ if (kbdev->hwaccess.active_kctx == kctx)
+ kbdev->hwaccess.active_kctx = NULL;
+
+ /* Ctx Attribute handling
+ *
+ * Releasing atoms attributes must either happen before this, or
+ * after the KCTX_SHEDULED flag is changed, otherwise we
+ * double-decount the attributes
+ */
+ runpool_ctx_attr_change |=
+ kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
+
+ /* Releasing the context and katom retained state can allow
+ * more jobs to run */
+ release_result |=
+ kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev,
+ kctx, katom_retained_state,
+ runpool_ctx_attr_change);
+
+ /*
+ * Transaction ends on AS and runpool_irq:
+ *
+ * By this point, the AS-related data is now clear and ready
+ * for re-use.
+ *
+ * Since releases only occur once for each previous successful
+ * retain, and no more retains are allowed on this context, no
+ * other thread will be operating in this
+ * code whilst we are
+ */
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ kbase_backend_release_ctx_noirq(kbdev, kctx);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* Note: Don't reuse kctx_as_nr now */
+
+ /* Synchronize with any policy timers */
+ kbase_backend_ctx_count_changed(kbdev);
+
+ /* update book-keeping info */
+ kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+ /* Signal any waiter that the context is not scheduled, so is
+ * safe for termination - once the jsctx_mutex is also dropped,
+ * and jobs have finished. */
+ wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+ /* Queue an action to occur after we've dropped the lock */
+ release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED |
+ KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
+ } else {
+ kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx,
+ katom_retained_state, runpool_ctx_attr_change);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&kbdev->pm.lock);
+ }
+
+ return release_result;
+}
+
+void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_atom_retained_state katom_retained_state;
+
+ /* Setup a dummy katom_retained_state */
+ kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+ kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+ &katom_retained_state);
+}
+
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx, bool has_pm_ref)
+{
+ struct kbasep_js_device_data *js_devdata;
+ union kbasep_js_policy *js_policy;
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_policy = &kbdev->js_data.policy;
+ js_devdata = &kbdev->js_data;
+
+ /* This is called if and only if you've you've detached the context from
+ * the Runpool or the Policy Queue, and not added it back to the Runpool
+ */
+ KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+ /* Dying: don't requeue, but kill all jobs on the context. This
+ * happens asynchronously */
+ dev_dbg(kbdev->dev,
+ "JS: ** Killing Context %p on RunPool Remove **", kctx);
+ kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel);
+ }
+}
+
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(
+ struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_release_result release_result;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+ katom_retained_state);
+
+ /* Drop the runpool mutex to allow requeing kctx */
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
+
+ /* Drop the jsctx_mutex to allow scheduling in a new context */
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL)
+ kbase_js_sched_all(kbdev);
+}
+
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_atom_retained_state katom_retained_state;
+
+ kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+ kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
+ &katom_retained_state);
+}
+
+/* Variant of kbasep_js_runpool_release_ctx() that doesn't call into
+ * kbase_js_sched_all() */
+static void kbasep_js_runpool_release_ctx_no_schedule(
+ struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_release_result release_result;
+ struct kbasep_js_atom_retained_state katom_retained_state_struct;
+ struct kbasep_js_atom_retained_state *katom_retained_state =
+ &katom_retained_state_struct;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+ kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+ katom_retained_state);
+
+ /* Drop the runpool mutex to allow requeing kctx */
+ mutex_unlock(&js_devdata->runpool_mutex);
+ if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
+
+ /* Drop the jsctx_mutex to allow scheduling in a new context */
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* NOTE: could return release_result if the caller would like to know
+ * whether it should schedule a new context, but currently no callers do
+ */
+}
+
+void kbase_js_set_timeouts(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbase_backend_timeouts_changed(kbdev);
+}
+
+static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ union kbasep_js_policy *js_policy;
+ struct kbase_as *new_address_space = NULL;
+ unsigned long flags;
+ bool kctx_suspended = false;
+ int as_nr;
+
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* Pick available address space for this context */
+ as_nr = kbase_backend_find_free_address_space(kbdev, kctx);
+
+ if (as_nr == KBASEP_AS_NR_INVALID)
+ return false; /* No address spaces currently available */
+
+ new_address_space = &kbdev->as[as_nr];
+
+ /*
+ * Atomic transaction on the Context and Run Pool begins
+ */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ /* Check to see if context is dying due to kbase_job_zap_context() */
+ if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+ /* Roll back the transaction so far and return */
+ kbase_backend_release_free_address_space(kbdev, as_nr);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ return false;
+ }
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL,
+ 0u,
+ kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+ kbase_ctx_flag_set(kctx, KCTX_SCHEDULED);
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Assign context to previously chosen address space */
+ if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* Roll back the transaction so far and return */
+ kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+
+ kbase_backend_release_free_address_space(kbdev, as_nr);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ return false;
+ }
+
+ kbdev->hwaccess.active_kctx = kctx;
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_mmu_as_in_use(kctx->as_nr);
+#endif
+ kbase_tlstream_tl_ret_as_ctx(&kbdev->as[kctx->as_nr], kctx);
+
+ /* Cause any future waiter-on-termination to wait until the context is
+ * descheduled */
+ wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+ /* Re-check for suspending: a suspend could've occurred, and all the
+ * contexts could've been removed from the runpool before we took this
+ * lock. In this case, we don't want to allow this context to run jobs,
+ * we just want it out immediately.
+ *
+ * The DMB required to read the suspend flag was issued recently as part
+ * of the hwaccess_lock locking. If a suspend occurs *after* that lock
+ * was taken (i.e. this condition doesn't execute), then the
+ * kbasep_js_suspend() code will cleanup this context instead (by virtue
+ * of it being called strictly after the suspend flag is set, and will
+ * wait for this lock to drop) */
+ if (kbase_pm_is_suspending(kbdev)) {
+ /* Cause it to leave at some later point */
+ bool retained;
+
+ retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+ KBASE_DEBUG_ASSERT(retained);
+
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+ kctx_suspended = true;
+ }
+
+ /* Transaction complete */
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ /* Synchronize with any policy timers */
+ kbase_backend_ctx_count_changed(kbdev);
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ /* Note: after this point, the context could potentially get scheduled
+ * out immediately */
+
+ if (kctx_suspended) {
+ /* Finishing forcing out the context due to a suspend. Use a
+ * variant of kbasep_js_runpool_release_ctx() that doesn't
+ * schedule a new context, to prevent a risk of recursion back
+ * into this function */
+ kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx);
+ return false;
+ }
+ return true;
+}
+
+static bool kbase_js_use_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (kbase_backend_use_ctx_sched(kbdev, kctx)) {
+ /* Context already has ASID - mark as active */
+ kbdev->hwaccess.active_kctx = kctx;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ return true; /* Context already scheduled */
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return kbasep_js_schedule_ctx(kbdev, kctx);
+}
+
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbasep_js_device_data *js_devdata;
+ bool is_scheduled;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* This must never be attempted whilst suspending - i.e. it should only
+ * happen in response to a syscall from a user-space thread */
+ BUG_ON(kbase_pm_is_suspending(kbdev));
+
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* Mark the context as privileged */
+ kbase_ctx_flag_set(kctx, KCTX_PRIVILEGED);
+
+ is_scheduled = kbase_ctx_flag(kctx, KCTX_SCHEDULED);
+ if (!is_scheduled) {
+ /* Add the context to the pullable list */
+ if (kbase_js_ctx_list_add_pullable_head(kbdev, kctx, 0))
+ kbase_js_sync_timers(kbdev);
+
+ /* Fast-starting requires the jsctx_mutex to be dropped,
+ * because it works on multiple ctxs */
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ /* Try to schedule the context in */
+ kbase_js_sched_all(kbdev);
+
+ /* Wait for the context to be scheduled in */
+ wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
+ kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ } else {
+ /* Already scheduled in - We need to retain it to keep the
+ * corresponding address space */
+ kbasep_js_runpool_retain_ctx(kbdev, kctx);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ }
+}
+KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx);
+
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* We don't need to use the address space anymore */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ kbase_ctx_flag_clear(kctx, KCTX_PRIVILEGED);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* Release the context - it will be scheduled out */
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+ kbase_js_sched_all(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx);
+
+void kbasep_js_suspend(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ int i;
+ u16 retained = 0u;
+ int nr_privileged_ctx = 0;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Prevent all contexts from submitting */
+ js_devdata->runpool_irq.submit_allowed = 0;
+
+ /* Retain each of the contexts, so we can cause it to leave even if it
+ * had no refcount to begin with */
+ for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
+ struct kbasep_js_per_as_data *js_per_as_data =
+ &js_devdata->runpool_irq.per_as_data[i];
+ struct kbase_context *kctx = js_per_as_data->kctx;
+
+ retained = retained << 1;
+
+ if (kctx) {
+ ++(js_per_as_data->as_busy_refcount);
+ retained |= 1u;
+ /* We can only cope with up to 1 privileged context -
+ * the instrumented context. It'll be suspended by
+ * disabling instrumentation */
+ if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
+ ++nr_privileged_ctx;
+ WARN_ON(nr_privileged_ctx != 1);
+ }
+ }
+ }
+ CSTD_UNUSED(nr_privileged_ctx);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* De-ref the previous retain to ensure each context gets pulled out
+ * sometime later. */
+ for (i = 0;
+ i < BASE_MAX_NR_AS;
+ ++i, retained = retained >> 1) {
+ struct kbasep_js_per_as_data *js_per_as_data =
+ &js_devdata->runpool_irq.per_as_data[i];
+ struct kbase_context *kctx = js_per_as_data->kctx;
+
+ if (retained & 1u)
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ }
+
+ /* Caller must wait for all Power Manager active references to be
+ * dropped */
+}
+
+void kbasep_js_resume(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata;
+ int js;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+ KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+ mutex_lock(&js_devdata->queue_mutex);
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ struct kbase_context *kctx, *n;
+
+ list_for_each_entry_safe(kctx, n,
+ &kbdev->js_data.ctx_list_unpullable[js],
+ jctx.sched_info.ctx.ctx_list_entry[js]) {
+ struct kbasep_js_kctx_info *js_kctx_info;
+ unsigned long flags;
+ bool timer_sync = false;
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
+ kbase_js_ctx_pullable(kctx, js, false))
+ timer_sync =
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ if (timer_sync)
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ }
+ }
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ /* Restart atom processing */
+ kbase_js_sched_all(kbdev);
+
+ /* JS Resume complete */
+}
+
+bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ if ((katom->core_req & BASE_JD_REQ_FS) &&
+ (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE |
+ BASE_JD_REQ_T)))
+ return false;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987) &&
+ (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) &&
+ (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T)))
+ return false;
+
+ return true;
+}
+
+static int kbase_js_get_slot(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom)
+{
+ if (katom->core_req & BASE_JD_REQ_FS)
+ return 0;
+
+ if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+ if (katom->device_nr == 1 &&
+ kbdev->gpu_props.num_core_groups == 2)
+ return 2;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ return 2;
+ }
+
+ return 1;
+}
+
+bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom)
+{
+ bool enqueue_required;
+
+ katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom);
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ /* If slot will transition from unpullable to pullable then add to
+ * pullable list */
+ if (jsctx_rb_none_to_pull(kctx, katom->slot_nr)) {
+ enqueue_required = true;
+ } else {
+ enqueue_required = false;
+ }
+ if ((katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) ||
+ (katom->pre_dep && (katom->pre_dep->atom_flags &
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+ int prio = katom->sched_priority;
+ int js = katom->slot_nr;
+ struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+ list_add_tail(&katom->queue, &queue->x_dep_head);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+ enqueue_required = false;
+ } else {
+ /* Check if there are lower priority jobs to soft stop */
+ kbase_job_slot_ctx_priority_check_locked(kctx, katom);
+
+ /* Add atom to ring buffer. */
+ jsctx_tree_add(kctx, katom);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+ }
+
+ return enqueue_required;
+}
+
+/**
+ * kbase_js_move_to_tree - Move atom (and any dependent atoms) to the
+ * runnable_tree, ready for execution
+ * @katom: Atom to submit
+ *
+ * It is assumed that @katom does not have KBASE_KATOM_FLAG_X_DEP_BLOCKED set,
+ * but is still present in the x_dep list. If @katom has a same-slot dependent
+ * atom then that atom (and any dependents) will also be moved.
+ */
+static void kbase_js_move_to_tree(struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&katom->kctx->kbdev->hwaccess_lock);
+
+ while (katom) {
+ WARN_ON(!(katom->atom_flags &
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST));
+
+ if (!(katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+ list_del(&katom->queue);
+ katom->atom_flags &=
+ ~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+ jsctx_tree_add(katom->kctx, katom);
+ katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+ } else {
+ break;
+ }
+
+ katom = katom->post_dep;
+ }
+}
+
+
+/**
+ * kbase_js_evict_deps - Evict dependencies of a failed atom.
+ * @kctx: Context pointer
+ * @katom: Pointer to the atom that has failed.
+ * @js: The job slot the katom was run on.
+ * @prio: Priority of the katom.
+ *
+ * Remove all post dependencies of an atom from the context ringbuffers.
+ *
+ * The original atom's event_code will be propogated to all dependent atoms.
+ *
+ * Context: Caller must hold the HW access lock
+ */
+static void kbase_js_evict_deps(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js, int prio)
+{
+ struct kbase_jd_atom *x_dep = katom->x_post_dep;
+ struct kbase_jd_atom *next_katom = katom->post_dep;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ if (next_katom) {
+ KBASE_DEBUG_ASSERT(next_katom->status !=
+ KBASE_JD_ATOM_STATE_HW_COMPLETED);
+ next_katom->will_fail_event_code = katom->event_code;
+
+ }
+
+ /* Has cross slot depenency. */
+ if (x_dep && (x_dep->atom_flags & (KBASE_KATOM_FLAG_JSCTX_IN_TREE |
+ KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+ /* Remove dependency.*/
+ x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+
+ /* Fail if it had a data dependency. */
+ if (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) {
+ x_dep->will_fail_event_code = katom->event_code;
+ }
+ if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST)
+ kbase_js_move_to_tree(x_dep);
+ }
+}
+
+struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js)
+{
+ struct kbase_jd_atom *katom;
+ struct kbasep_js_device_data *js_devdata;
+ int pulled;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ js_devdata = &kctx->kbdev->js_data;
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+ return NULL;
+ if (kbase_pm_is_suspending(kctx->kbdev))
+ return NULL;
+
+ katom = jsctx_rb_peek(kctx, js);
+ if (!katom)
+ return NULL;
+
+ if (atomic_read(&katom->blocked))
+ return NULL;
+
+ /* Due to ordering restrictions when unpulling atoms on failure, we do
+ * not allow multiple runs of fail-dep atoms from the same context to be
+ * present on the same slot */
+ if (katom->pre_dep && atomic_read(&kctx->atoms_pulled_slot[js])) {
+ struct kbase_jd_atom *prev_atom =
+ kbase_backend_inspect_tail(kctx->kbdev, js);
+
+ if (prev_atom && prev_atom->kctx != kctx)
+ return NULL;
+ }
+
+ if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
+ if (katom->x_pre_dep->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
+ katom->x_pre_dep->will_fail_event_code)
+ return NULL;
+ if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
+ kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
+ return NULL;
+ }
+
+ kbase_ctx_flag_set(kctx, KCTX_PULLED);
+
+ pulled = atomic_inc_return(&kctx->atoms_pulled);
+ if (pulled == 1 && !kctx->slots_pullable) {
+ WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+ atomic_inc(&kctx->kbdev->js_data.nr_contexts_runnable);
+ }
+ atomic_inc(&kctx->atoms_pulled_slot[katom->slot_nr]);
+ jsctx_rb_pull(kctx, katom);
+
+ kbasep_js_runpool_retain_ctx_nolock(kctx->kbdev, kctx);
+ katom->atom_flags |= KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+
+ katom->sched_info.cfs.ticks = 0;
+
+ return katom;
+}
+
+
+static void js_return_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
+ work);
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+ struct kbasep_js_atom_retained_state retained_state;
+ int js = katom->slot_nr;
+ bool timer_sync = false;
+ bool context_idle = false;
+ unsigned long flags;
+ base_jd_core_req core_req = katom->core_req;
+ u64 affinity = katom->affinity;
+ enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
+
+ kbase_tlstream_tl_event_atom_softstop_ex(katom);
+
+ kbase_backend_complete_wq(kbdev, katom);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+ kbasep_js_atom_retained_state_copy(&retained_state, katom);
+
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ atomic_dec(&kctx->atoms_pulled);
+ atomic_dec(&kctx->atoms_pulled_slot[js]);
+
+ atomic_dec(&katom->blocked);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (!atomic_read(&kctx->atoms_pulled_slot[js]) &&
+ jsctx_rb_none_to_pull(kctx, js))
+ timer_sync |= kbase_js_ctx_list_remove_nolock(kbdev, kctx, js);
+
+ if (!atomic_read(&kctx->atoms_pulled)) {
+ if (!kctx->slots_pullable) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ timer_sync = true;
+ }
+
+ if (kctx->as_nr != KBASEP_AS_NR_INVALID &&
+ !kbase_ctx_flag(kctx, KCTX_DYING)) {
+ int num_slots = kbdev->gpu_props.num_job_slots;
+ int slot;
+
+ if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+ for (slot = 0; slot < num_slots; slot++) {
+ if (kbase_js_ctx_pullable(kctx, slot, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, slot);
+ }
+ }
+
+ kbase_jm_idle_ctx(kbdev, kctx);
+
+ context_idle = true;
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ if (context_idle) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+ kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+ kbase_pm_context_idle(kbdev);
+ }
+
+ if (timer_sync)
+ kbase_js_sync_timers(kbdev);
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+ kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
+ &retained_state);
+
+ kbase_js_sched_all(kbdev);
+
+ kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
+ coreref_state);
+}
+
+void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ jsctx_rb_unpull(kctx, katom);
+
+ WARN_ON(work_pending(&katom->work));
+
+ /* Block re-submission until workqueue has run */
+ atomic_inc(&katom->blocked);
+
+ kbase_job_check_leave_disjoint(kctx->kbdev, katom);
+
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, js_return_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_device *kbdev;
+ unsigned long flags;
+ bool timer_sync = false;
+ int atom_slot;
+ bool context_idle = false;
+
+ kbdev = kctx->kbdev;
+ atom_slot = katom->slot_nr;
+
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) {
+ context_idle = !atomic_dec_return(&kctx->atoms_pulled);
+ atomic_dec(&kctx->atoms_pulled_slot[atom_slot]);
+
+ if (!atomic_read(&kctx->atoms_pulled) &&
+ !kctx->slots_pullable) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+ kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+ atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+ timer_sync = true;
+ }
+ }
+ WARN_ON(!(katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE));
+
+ if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) &&
+ jsctx_rb_none_to_pull(kctx, atom_slot)) {
+ if (!list_empty(
+ &kctx->jctx.sched_info.ctx.ctx_list_entry[atom_slot]))
+ timer_sync |= kbase_js_ctx_list_remove_nolock(
+ kctx->kbdev, kctx, atom_slot);
+ }
+
+ /*
+ * If submission is disabled on this context (most likely due to an
+ * atom failure) and there are now no atoms left in the system then
+ * re-enable submission so that context can be scheduled again.
+ */
+ if (!kbasep_js_is_submit_allowed(js_devdata, kctx) &&
+ !atomic_read(&kctx->atoms_pulled) &&
+ !kbase_ctx_flag(kctx, KCTX_DYING)) {
+ int js;
+
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ if (kbase_js_ctx_pullable(kctx, js, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, js);
+ }
+ } else if (katom->x_post_dep &&
+ kbasep_js_is_submit_allowed(js_devdata, kctx)) {
+ int js;
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ if (kbase_js_ctx_pullable(kctx, js, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, js);
+ }
+ }
+
+ /* Mark context as inactive. The pm reference will be dropped later in
+ * jd_done_worker().
+ */
+ if (context_idle)
+ kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ if (timer_sync)
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ return context_idle;
+}
+
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
+ ktime_t *end_timestamp)
+{
+ u64 microseconds_spent = 0;
+ struct kbase_device *kbdev;
+ struct kbase_context *kctx = katom->kctx;
+ union kbasep_js_policy *js_policy;
+ struct kbase_jd_atom *x_dep = katom->x_post_dep;
+
+ kbdev = kctx->kbdev;
+
+ js_policy = &kbdev->js_data.policy;
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ if (katom->will_fail_event_code)
+ katom->event_code = katom->will_fail_event_code;
+
+ katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED;
+
+ if (katom->event_code != BASE_JD_EVENT_DONE) {
+ kbase_js_evict_deps(kctx, katom, katom->slot_nr,
+ katom->sched_priority);
+ }
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP,
+ katom->slot_nr), NULL, 0);
+#endif
+
+ /* Calculate the job's time used */
+ if (end_timestamp != NULL) {
+ /* Only calculating it for jobs that really run on the HW (e.g.
+ * removed from next jobs never actually ran, so really did take
+ * zero time) */
+ ktime_t tick_diff = ktime_sub(*end_timestamp,
+ katom->start_timestamp);
+
+ microseconds_spent = ktime_to_ns(tick_diff);
+
+ do_div(microseconds_spent, 1000);
+
+ /* Round up time spent to the minimum timer resolution */
+ if (microseconds_spent < KBASEP_JS_TICK_RESOLUTION_US)
+ microseconds_spent = KBASEP_JS_TICK_RESOLUTION_US;
+ }
+
+ /* Log the result of the job (completion status, and time spent). */
+ kbasep_js_policy_log_job_result(js_policy, katom, microseconds_spent);
+
+ kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0);
+
+ /* Unblock cross dependency if present */
+ if (x_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
+ !(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) &&
+ (x_dep->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+ bool was_pullable = kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+ false);
+ x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+ kbase_js_move_to_tree(x_dep);
+ if (!was_pullable && kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+ false))
+ kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx,
+ x_dep->slot_nr);
+
+ if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE)
+ return x_dep;
+ }
+
+ return NULL;
+}
+
+void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
+{
+ struct kbasep_js_device_data *js_devdata;
+ bool timer_sync = false;
+
+ js_devdata = &kbdev->js_data;
+
+ down(&js_devdata->schedule_sem);
+ mutex_lock(&js_devdata->queue_mutex);
+
+ while (js_mask) {
+ int js;
+
+ js = ffs(js_mask) - 1;
+
+ while (1) {
+ struct kbase_context *kctx;
+ unsigned long flags;
+ bool context_idle = false;
+
+ kctx = kbase_js_ctx_list_pop_head(kbdev, js);
+
+ if (!kctx) {
+ js_mask &= ~(1 << js);
+ break; /* No contexts on pullable list */
+ }
+
+ if (!kbase_ctx_flag(kctx, KCTX_ACTIVE)) {
+ context_idle = true;
+
+ if (kbase_pm_context_active_handle_suspend(
+ kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
+ /* Suspend pending - return context to
+ * queue and stop scheduling */
+ mutex_lock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+ if (kbase_js_ctx_list_add_pullable_head(
+ kctx->kbdev, kctx, js))
+ kbase_js_sync_timers(kbdev);
+ mutex_unlock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ up(&js_devdata->schedule_sem);
+ return;
+ }
+ kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+ }
+
+ if (!kbase_js_use_ctx(kbdev, kctx)) {
+ mutex_lock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+ /* Context can not be used at this time */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (kbase_js_ctx_pullable(kctx, js, false)
+ || kbase_ctx_flag(kctx, KCTX_PRIVILEGED))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_head_nolock(
+ kctx->kbdev, kctx, js);
+ else
+ timer_sync |=
+ kbase_js_ctx_list_add_unpullable_nolock(
+ kctx->kbdev, kctx, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+ mutex_unlock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+ if (context_idle) {
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+ kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+ kbase_pm_context_idle(kbdev);
+ }
+
+ /* No more jobs can be submitted on this slot */
+ js_mask &= ~(1 << js);
+ break;
+ }
+ mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_ctx_flag_clear(kctx, KCTX_PULLED);
+
+ if (!kbase_jm_kick(kbdev, 1 << js))
+ /* No more jobs can be submitted on this slot */
+ js_mask &= ~(1 << js);
+
+ if (!kbase_ctx_flag(kctx, KCTX_PULLED)) {
+ /* Failed to pull jobs - push to head of list */
+ if (kbase_js_ctx_pullable(kctx, js, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_head_nolock(
+ kctx->kbdev,
+ kctx, js);
+ else
+ timer_sync |=
+ kbase_js_ctx_list_add_unpullable_nolock(
+ kctx->kbdev,
+ kctx, js);
+
+ if (context_idle) {
+ kbase_jm_idle_ctx(kbdev, kctx);
+ spin_unlock_irqrestore(
+ &kbdev->hwaccess_lock,
+ flags);
+ WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+ kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+ kbase_pm_context_idle(kbdev);
+ } else {
+ spin_unlock_irqrestore(
+ &kbdev->hwaccess_lock,
+ flags);
+ }
+ mutex_unlock(
+ &kctx->jctx.sched_info.ctx.jsctx_mutex);
+
+ js_mask &= ~(1 << js);
+ break; /* Could not run atoms on this slot */
+ }
+
+ /* Push to back of list */
+ if (kbase_js_ctx_pullable(kctx, js, true))
+ timer_sync |=
+ kbase_js_ctx_list_add_pullable_nolock(
+ kctx->kbdev, kctx, js);
+ else
+ timer_sync |=
+ kbase_js_ctx_list_add_unpullable_nolock(
+ kctx->kbdev, kctx, js);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ }
+ }
+
+ if (timer_sync)
+ kbase_js_sync_timers(kbdev);
+
+ mutex_unlock(&js_devdata->queue_mutex);
+ up(&js_devdata->schedule_sem);
+}
+
+void kbase_js_zap_context(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+ int js;
+
+ /*
+ * Critical assumption: No more submission is possible outside of the
+ * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
+ * whilst the struct kbase_context is terminating.
+ */
+
+ /* First, atomically do the following:
+ * - mark the context as dying
+ * - try to evict it from the policy queue */
+ mutex_lock(&kctx->jctx.lock);
+ mutex_lock(&js_devdata->queue_mutex);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ kbase_ctx_flag_set(kctx, KCTX_DYING);
+
+ dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
+
+ /*
+ * At this point we know:
+ * - If eviction succeeded, it was in the policy queue, but now no
+ * longer is
+ * - We must cancel the jobs here. No Power Manager active reference to
+ * release.
+ * - This happens asynchronously - kbase_jd_zap_context() will wait for
+ * those jobs to be killed.
+ * - If eviction failed, then it wasn't in the policy queue. It is one
+ * of the following:
+ * - a. it didn't have any jobs, and so is not in the Policy Queue or
+ * the Run Pool (not scheduled)
+ * - Hence, no more work required to cancel jobs. No Power Manager
+ * active reference to release.
+ * - b. it was in the middle of a scheduling transaction (and thus must
+ * have at least 1 job). This can happen from a syscall or a
+ * kernel thread. We still hold the jsctx_mutex, and so the thread
+ * must be waiting inside kbasep_js_try_schedule_head_ctx(),
+ * before checking whether the runpool is full. That thread will
+ * continue after we drop the mutex, and will notice the context
+ * is dying. It will rollback the transaction, killing all jobs at
+ * the same time. kbase_jd_zap_context() will wait for those jobs
+ * to be killed.
+ * - Hence, no more work required to cancel jobs, or to release the
+ * Power Manager active reference.
+ * - c. it is scheduled, and may or may not be running jobs
+ * - We must cause it to leave the runpool by stopping it from
+ * submitting any more jobs. When it finally does leave,
+ * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs
+ * (because it is dying), release the Power Manager active reference,
+ * and will not requeue the context in the policy queue.
+ * kbase_jd_zap_context() will wait for those jobs to be killed.
+ * - Hence, work required just to make it leave the runpool. Cancelling
+ * jobs and releasing the Power manager active reference will be
+ * handled when it leaves the runpool.
+ */
+ if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ if (!list_empty(
+ &kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+ list_del_init(
+ &kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+ }
+
+ /* The following events require us to kill off remaining jobs
+ * and update PM book-keeping:
+ * - we evicted it correctly (it must have jobs to be in the
+ * Policy Queue)
+ *
+ * These events need no action, but take this path anyway:
+ * - Case a: it didn't have any jobs, and was never in the Queue
+ * - Case b: scheduling transaction will be partially rolled-
+ * back (this already cancels the jobs)
+ */
+
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u,
+ kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ dev_dbg(kbdev->dev, "Zap: Ctx %p scheduled=0", kctx);
+
+ /* Only cancel jobs when we evicted from the policy
+ * queue. No Power Manager active reference was held.
+ *
+ * Having is_dying set ensures that this kills, and
+ * doesn't requeue */
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false);
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ mutex_unlock(&kctx->jctx.lock);
+ } else {
+ unsigned long flags;
+ bool was_retained;
+
+ /* Case c: didn't evict, but it is scheduled - it's in the Run
+ * Pool */
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u,
+ kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
+
+ /* Disable the ctx from submitting any more jobs */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+ /* Retain and (later) release the context whilst it is is now
+ * disallowed from submitting jobs - ensures that someone
+ * somewhere will be removing the context later on */
+ was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+
+ /* Since it's scheduled and we have the jsctx_mutex, it must be
+ * retained successfully */
+ KBASE_DEBUG_ASSERT(was_retained);
+
+ dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
+
+ /* Cancel any remaining running jobs for this kctx - if any.
+ * Submit is disallowed which takes effect immediately, so no
+ * more new jobs will appear after we do this. */
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+ kbase_job_slot_hardstop(kctx, js, NULL);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_unlock(&js_devdata->queue_mutex);
+ mutex_unlock(&kctx->jctx.lock);
+
+ dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)",
+ kctx);
+
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ }
+
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
+
+ /* After this, you must wait on both the
+ * kbase_jd_context::zero_jobs_wait and the
+ * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs
+ * to be destroyed, and the context to be de-scheduled (if it was on the
+ * runpool).
+ *
+ * kbase_jd_zap_context() will do this. */
+}
+
+static inline int trace_get_refcnt(struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ int as_nr;
+ int refcnt = 0;
+
+ js_devdata = &kbdev->js_data;
+
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ struct kbasep_js_per_as_data *js_per_as_data;
+
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ refcnt = js_per_as_data->as_busy_refcount;
+ }
+
+ return refcnt;
+}
+
+/**
+ * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context
+ * @kctx: Pointer to context.
+ * @callback: Pointer to function to call for each job.
+ *
+ * Call a function on all jobs belonging to a non-queued, non-running
+ * context, and detach the jobs from the context as it goes.
+ *
+ * Due to the locks that might be held at the time of the call, the callback
+ * may need to defer work on a workqueue to complete its actions (e.g. when
+ * cancelling jobs)
+ *
+ * Atoms will be removed from the queue, so this must only be called when
+ * cancelling jobs (which occurs as part of context destruction).
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ */
+static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
+ kbasep_js_policy_ctx_job_cb callback)
+{
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+ unsigned long flags;
+ u32 js;
+
+ kbdev = kctx->kbdev;
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL,
+ 0u, trace_get_refcnt(kbdev, kctx));
+
+ /* Invoke callback on jobs on each slot in turn */
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+ jsctx_queue_foreach(kctx, js, callback);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
diff --git a/midgard-0.r2p0/mali_kbase_js.h b/midgard-0.r2p0/mali_kbase_js.h
new file mode 100755
index 0000000..8969222
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_js.h
@@ -0,0 +1,1058 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler APIs.
+ */
+
+#ifndef _KBASE_JS_H_
+#define _KBASE_JS_H_
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_js_policy.h"
+#include "mali_kbase_context.h"
+#include "mali_kbase_defs.h"
+#include "mali_kbase_debug.h"
+
+#include "mali_kbase_js_ctx_attr.h"
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js Job Scheduler Internal APIs
+ * @{
+ *
+ * These APIs are Internal to KBase and are available for use by the
+ * @ref kbase_js_policy "Job Scheduler Policy APIs"
+ */
+
+/**
+ * @brief Initialize the Job Scheduler
+ *
+ * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero
+ * initialized before passing to the kbasep_js_devdata_init() function. This is
+ * to give efficient error path code.
+ */
+int kbasep_js_devdata_init(struct kbase_device * const kbdev);
+
+/**
+ * @brief Halt the Job Scheduler.
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ *
+ */
+void kbasep_js_devdata_halt(struct kbase_device *kbdev);
+
+/**
+ * @brief Terminate the Job Scheduler
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ */
+void kbasep_js_devdata_term(struct kbase_device *kbdev);
+
+/**
+ * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler.
+ *
+ * This effectively registers a struct kbase_context with a Job Scheduler.
+ *
+ * It does not register any jobs owned by the struct kbase_context with the scheduler.
+ * Those must be separately registered by kbasep_js_add_job().
+ *
+ * The struct kbase_context must be zero intitialized before passing to the
+ * kbase_js_init() function. This is to give efficient error path code.
+ */
+int kbasep_js_kctx_init(struct kbase_context * const kctx);
+
+/**
+ * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler
+ *
+ * This effectively de-registers a struct kbase_context from its Job Scheduler
+ *
+ * It is safe to call this on a struct kbase_context that has never had or failed
+ * initialization of its jctx.sched_info member, to give efficient error-path
+ * code.
+ *
+ * For this to work, the struct kbase_context must be zero intitialized before passing
+ * to the kbase_js_init() function.
+ *
+ * It is a Programming Error to call this whilst there are still jobs
+ * registered with this context.
+ */
+void kbasep_js_kctx_term(struct kbase_context *kctx);
+
+/**
+ * @brief Add a job chain to the Job Scheduler, and take necessary actions to
+ * schedule the context/run the job.
+ *
+ * This atomically does the following:
+ * - Update the numbers of jobs information
+ * - Add the job to the run pool if necessary (part of init_job)
+ *
+ * Once this is done, then an appropriate action is taken:
+ * - If the ctx is scheduled, it attempts to start the next job (which might be
+ * this added job)
+ * - Otherwise, and if this is the first job on the context, it enqueues it on
+ * the Policy Queue
+ *
+ * The Policy's Queue can be updated by this in the following ways:
+ * - In the above case that this is the first job on the context
+ * - If the context is high priority and the context is not scheduled, then it
+ * could cause the Policy to schedule out a low-priority context, allowing
+ * this context to be scheduled in.
+ *
+ * If the context is already scheduled on the RunPool, then adding a job to it
+ * is guarenteed not to update the Policy Queue. And so, the caller is
+ * guarenteed to not need to try scheduling a context from the Run Pool - it
+ * can safely assert that the result is false.
+ *
+ * It is a programming error to have more than U32_MAX jobs in flight at a time.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold hwaccess_lock (as this will be obtained internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ *
+ * @return true indicates that the Policy Queue was updated, and so the
+ * caller will need to try scheduling a context onto the Run Pool.
+ * @return false indicates that no updates were made to the Policy Queue,
+ * so no further action is required from the caller. This is \b always returned
+ * when the context is currently scheduled.
+ */
+bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom);
+
+/**
+ * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'.
+ *
+ * Completely removing a job requires several calls:
+ * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of
+ * the atom
+ * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler
+ * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the
+ * remaining state held as part of the job having been run.
+ *
+ * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions.
+ *
+ * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool:
+ * - it has not been removed with kbasep_js_policy_dequeue_job()
+ * - or, it has not been removed with kbasep_js_policy_dequeue_job_irq()
+ *
+ * Do not use this for removing jobs being killed by kbase_jd_cancel() - use
+ * kbasep_js_remove_cancelled_job() instead.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ *
+ */
+void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom);
+
+/**
+ * @brief Completely remove a job chain from the Job Scheduler, in the case
+ * where the job chain was cancelled.
+ *
+ * This is a variant of kbasep_js_remove_job() that takes care of removing all
+ * of the retained state too. This is generally useful for cancelled atoms,
+ * which need not be handled in an optimal way.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool:
+ * - it is not being killed with kbasep_jd_cancel()
+ * - or, it has not been removed with kbasep_js_policy_dequeue_job()
+ * - or, it has not been removed with kbasep_js_policy_dequeue_job_irq()
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold the hwaccess_lock, (as this will be obtained
+ * internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
+ * obtained internally)
+ *
+ * @return true indicates that ctx attributes have changed and the caller
+ * should call kbase_js_sched_all() to try to run more jobs
+ * @return false otherwise
+ */
+bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_jd_atom *katom);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ *
+ * @return value != false if the retain succeeded, and the context will not be scheduled out.
+ * @return false if the retain failed (because the context is being/has been scheduled out).
+ */
+bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locks must be held by the caller:
+ * - hwaccess_lock
+ *
+ * @return value != false if the retain succeeded, and the context will not be scheduled out.
+ * @return false if the retain failed (because the context is being/has been scheduled out).
+ */
+bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Lookup a context in the Run Pool based upon its current address space
+ * and ensure that is stays scheduled in.
+ *
+ * The context is refcounted as being busy to prevent it from scheduling
+ * out. It must be released with kbasep_js_runpool_release_ctx() when it is no
+ * longer required to stay scheduled in.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * If the hwaccess_lock is already held, then the caller should use
+ * kbasep_js_runpool_lookup_ctx_nolock() instead.
+ *
+ * @return a valid struct kbase_context on success, which has been refcounted as being busy.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr);
+
+/**
+ * kbasep_js_runpool_lookup_ctx_nolock - Lookup a context in the Run Pool based
+ * upon its current address space and ensure that is stays scheduled in.
+ * @kbdev: Device pointer
+ * @as_nr: Address space to lookup
+ *
+ * The context is refcounted as being busy to prevent it from scheduling
+ * out. It must be released with kbasep_js_runpool_release_ctx() when it is no
+ * longer required to stay scheduled in.
+ *
+ * Note: This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must the hold the hwaccess_lock
+ *
+ * Return: a valid struct kbase_context on success, which has been refcounted as
+ * being busy.
+ * NULL on failure, indicating that no context was found in \a as_nr
+ */
+struct kbase_context *kbasep_js_runpool_lookup_ctx_nolock(
+ struct kbase_device *kbdev, int as_nr);
+
+/**
+ * @brief Handling the requeuing/killing of a context that was evicted from the
+ * policy queue or runpool.
+ *
+ * This should be used whenever handing off a context that has been evicted
+ * from the policy queue or the runpool:
+ * - If the context is not dying and has jobs, it gets re-added to the policy
+ * queue
+ * - Otherwise, it is not added
+ *
+ * In addition, if the context is dying the jobs are killed asynchronously.
+ *
+ * In all cases, the Power Manager active reference is released
+ * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true. \a
+ * has_pm_ref must be set to false whenever the context was not previously in
+ * the runpool and does not hold a Power Manager active refcount. Note that
+ * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an
+ * active refcount even though they weren't in the runpool.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ */
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref);
+
+/**
+ * @brief Release a refcount of a context being busy, allowing it to be
+ * scheduled out.
+ *
+ * When the refcount reaches zero and the context \em might be scheduled out
+ * (depending on whether the Scheudling Policy has deemed it so, or if it has run
+ * out of jobs).
+ *
+ * If the context does get scheduled out, then The following actions will be
+ * taken as part of deschduling a context:
+ * - For the context being descheduled:
+ * - If the context is in the processing of dying (all the jobs are being
+ * removed from it), then descheduling also kills off any jobs remaining in the
+ * context.
+ * - If the context is not dying, and any jobs remain after descheduling the
+ * context then it is re-enqueued to the Policy's Queue.
+ * - Otherwise, the context is still known to the scheduler, but remains absent
+ * from the Policy Queue until a job is next added to it.
+ * - In all descheduling cases, the Power Manager active reference (obtained
+ * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()).
+ *
+ * Whilst the context is being descheduled, this also handles actions that
+ * cause more atoms to be run:
+ * - Attempt submitting atoms when the Context Attributes on the Runpool have
+ * changed. This is because the context being scheduled out could mean that
+ * there are more opportunities to run atoms.
+ * - Attempt submitting to a slot that was previously blocked due to affinity
+ * restrictions. This is usually only necessary when releasing a context
+ * happens as part of completing a previous job, but is harmless nonetheless.
+ * - Attempt scheduling in a new context (if one is available), and if necessary,
+ * running a job from that new context.
+ *
+ * Unlike retaining a context in the runpool, this function \b cannot be called
+ * from IRQ context.
+ *
+ * It is a programming error to call this on a \a kctx that is not currently
+ * scheduled, or that already has a zero refcount.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional
+ * actions from completing an atom.
+ *
+ * This is usually called as part of completing an atom and releasing the
+ * refcount on the context held by the atom.
+ *
+ * Therefore, the extra actions carried out are part of handling actions queued
+ * on a completed atom, namely:
+ * - Releasing the atom's context attributes
+ * - Retrying the submission on a particular slot, because we couldn't submit
+ * on that slot from an IRQ handler.
+ *
+ * The locking conditions of this function are the same as those for
+ * kbasep_js_runpool_release_ctx()
+ */
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * @brief Variant of kbase_js_runpool_release_ctx() that assumes that
+ * kbasep_js_device_data::runpool_mutex and
+ * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not
+ * attempt to schedule new contexts.
+ */
+void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
+ struct kbase_context *kctx);
+
+/**
+ * @brief Schedule in a privileged context
+ *
+ * This schedules a context in regardless of the context priority.
+ * If the runpool is full, a context will be forced out of the runpool and the function will wait
+ * for the new context to be scheduled in.
+ * The context will be kept scheduled in (and the corresponding address space reserved) until
+ * kbasep_js_release_privileged_ctx is called).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
+ * be used internally.
+ *
+ */
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Release a privileged context, allowing it to be scheduled out.
+ *
+ * See kbasep_js_runpool_release_ctx for potential side effects.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Try to submit the next job on each slot
+ *
+ * The following locks may be used:
+ * - kbasep_js_device_data::runpool_mutex
+ * - hwaccess_lock
+ */
+void kbase_js_try_run_jobs(struct kbase_device *kbdev);
+
+/**
+ * @brief Suspend the job scheduler during a Power Management Suspend event.
+ *
+ * Causes all contexts to be removed from the runpool, and prevents any
+ * contexts from (re)entering the runpool.
+ *
+ * This does not handle suspending the one privileged context: the caller must
+ * instead do this by by suspending the GPU HW Counter Instrumentation.
+ *
+ * This will eventually cause all Power Management active references held by
+ * contexts on the runpool to be released, without running any more atoms.
+ *
+ * The caller must then wait for all Power Mangement active refcount to become
+ * zero before completing the suspend.
+ *
+ * The emptying mechanism may take some time to complete, since it can wait for
+ * jobs to complete naturally instead of forcing them to end quickly. However,
+ * this is bounded by the Job Scheduling Policy's Job Timeouts. Hence, this
+ * function is guaranteed to complete in a finite time whenever the Job
+ * Scheduling Policy implements Job Timeouts (such as those done by CFS).
+ */
+void kbasep_js_suspend(struct kbase_device *kbdev);
+
+/**
+ * @brief Resume the Job Scheduler after a Power Management Resume event.
+ *
+ * This restores the actions from kbasep_js_suspend():
+ * - Schedules contexts back into the runpool
+ * - Resumes running atoms on the GPU
+ */
+void kbasep_js_resume(struct kbase_device *kbdev);
+
+/**
+ * @brief Submit an atom to the job scheduler.
+ *
+ * The atom is enqueued on the context's ringbuffer. The caller must have
+ * ensured that all dependencies can be represented in the ringbuffer.
+ *
+ * Caller must hold jctx->lock
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] atom Pointer to the atom to submit
+ *
+ * @return Whether the context requires to be enqueued. */
+bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom);
+
+/**
+ * jsctx_ll_flush_to_rb() - Pushes atoms from the linked list to ringbuffer.
+ * @kctx: Context Pointer
+ * @prio: Priority (specifies the queue together with js).
+ * @js: Job slot (specifies the queue together with prio).
+ *
+ * Pushes all possible atoms from the linked list to the ringbuffer.
+ * Number of atoms are limited to free space in the ringbuffer and
+ * number of available atoms in the linked list.
+ *
+ */
+void jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js);
+/**
+ * @brief Pull an atom from a context in the job scheduler for execution.
+ *
+ * The atom will not be removed from the ringbuffer at this stage.
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] kctx Context to pull from
+ * @param[in] js Job slot to pull from
+ * @return Pointer to an atom, or NULL if there are no atoms for this
+ * slot that can be currently run.
+ */
+struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js);
+
+/**
+ * @brief Return an atom to the job scheduler ringbuffer.
+ *
+ * An atom is 'unpulled' if execution is stopped but intended to be returned to
+ * later. The most common reason for this is that the atom has been
+ * soft-stopped.
+ *
+ * Note that if multiple atoms are to be 'unpulled', they must be returned in
+ * the reverse order to which they were originally pulled. It is a programming
+ * error to return atoms in any other order.
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] atom Pointer to the atom to unpull
+ */
+void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Complete an atom from jd_done_worker(), removing it from the job
+ * scheduler ringbuffer.
+ *
+ * If the atom failed then all dependee atoms marked for failure propagation
+ * will also fail.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] katom Pointer to the atom to complete
+ * @return true if the context is now idle (no jobs pulled)
+ * false otherwise
+ */
+bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
+ struct kbase_jd_atom *katom);
+
+/**
+ * @brief Complete an atom.
+ *
+ * Most of the work required to complete an atom will be performed by
+ * jd_done_worker().
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] katom Pointer to the atom to complete
+ * @param[in] end_timestamp The time that the atom completed (may be NULL)
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
+ ktime_t *end_timestamp);
+
+/**
+ * @brief Submit atoms from all available contexts.
+ *
+ * This will attempt to submit as many jobs as possible to the provided job
+ * slots. It will exit when either all job slots are full, or all contexts have
+ * been used.
+ *
+ * @param[in] kbdev Device pointer
+ * @param[in] js_mask Mask of job slots to submit to
+ */
+void kbase_js_sched(struct kbase_device *kbdev, int js_mask);
+
+/**
+ * kbase_jd_zap_context - Attempt to deschedule a context that is being
+ * destroyed
+ * @kctx: Context pointer
+ *
+ * This will attempt to remove a context from any internal job scheduler queues
+ * and perform any other actions to ensure a context will not be submitted
+ * from.
+ *
+ * If the context is currently scheduled, then the caller must wait for all
+ * pending jobs to complete before taking any further action.
+ */
+void kbase_js_zap_context(struct kbase_context *kctx);
+
+/**
+ * @brief Validate an atom
+ *
+ * This will determine whether the atom can be scheduled onto the GPU. Atoms
+ * with invalid combinations of core requirements will be rejected.
+ *
+ * @param[in] kbdev Device pointer
+ * @param[in] katom Atom to validate
+ * @return true if atom is valid
+ * false otherwise
+ */
+bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
+ struct kbase_jd_atom *katom);
+
+/**
+ * kbase_js_set_timeouts - update all JS timeouts with user specified data
+ * @kbdev: Device pointer
+ *
+ * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
+ * set to a positive number then that becomes the new value used, if a timeout
+ * is negative then the default is set.
+ */
+void kbase_js_set_timeouts(struct kbase_device *kbdev);
+
+/*
+ * Helpers follow
+ */
+
+/**
+ * @brief Check that a context is allowed to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * As with any bool, never test the return value with true.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+ u16 test_bit;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ test_bit = (u16) (1u << kctx->as_nr);
+
+ return (bool) (js_devdata->runpool_irq.submit_allowed & test_bit);
+}
+
+/**
+ * @brief Allow a context to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+ u16 set_bit;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ set_bit = (u16) (1u << kctx->as_nr);
+
+ dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+
+ js_devdata->runpool_irq.submit_allowed |= set_bit;
+}
+
+/**
+ * @brief Prevent a context from submitting more jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+ u16 clear_bit;
+ u16 clear_mask;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ clear_bit = (u16) (1u << kctx->as_nr);
+ clear_mask = ~clear_bit;
+
+ dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+
+ js_devdata->runpool_irq.submit_allowed &= clear_mask;
+}
+
+/**
+ * @brief Manage the 'retry_submit_on_slot' part of a kbase_jd_atom
+ */
+static inline void kbasep_js_clear_job_retry_submit(struct kbase_jd_atom *atom)
+{
+ atom->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
+}
+
+/**
+ * Mark a slot as requiring resubmission by carrying that information on a
+ * completing atom.
+ *
+ * @note This can ASSERT in debug builds if the submit slot has been set to
+ * something other than the current value for @a js. This is because you might
+ * be unintentionally stopping more jobs being submitted on the old submit
+ * slot, and that might cause a scheduling-hang.
+ *
+ * @note If you can guarantee that the atoms for the original slot will be
+ * submitted on some other slot, then call kbasep_js_clear_job_retry_submit()
+ * first to silence the ASSERT.
+ */
+static inline void kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom *atom, int js)
+{
+ KBASE_DEBUG_ASSERT(0 <= js && js <= BASE_JM_MAX_NR_SLOTS);
+ KBASE_DEBUG_ASSERT((atom->retry_submit_on_slot ==
+ KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID)
+ || (atom->retry_submit_on_slot == js));
+
+ atom->retry_submit_on_slot = js;
+}
+
+/**
+ * Create an initial 'invalid' atom retained state, that requires no
+ * atom-related work to be done on releasing with
+ * kbasep_js_runpool_release_ctx_and_katom_retained_state()
+ */
+static inline void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state)
+{
+ retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
+ retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
+ retained_state->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
+}
+
+/**
+ * Copy atom state that can be made available after jd_done_nolock() is called
+ * on that atom.
+ */
+static inline void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom)
+{
+ retained_state->event_code = katom->event_code;
+ retained_state->core_req = katom->core_req;
+ retained_state->retry_submit_on_slot = katom->retry_submit_on_slot;
+ retained_state->sched_priority = katom->sched_priority;
+ retained_state->device_nr = katom->device_nr;
+}
+
+/**
+ * @brief Determine whether an atom has finished (given its retained state),
+ * and so should be given back to userspace/removed from the system.
+ *
+ * Reasons for an atom not finishing include:
+ * - Being soft-stopped (and so, the atom should be resubmitted sometime later)
+ *
+ * @param[in] katom_retained_state the retained state of the atom to check
+ * @return false if the atom has not finished
+ * @return !=false if the atom has finished
+ */
+static inline bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ return (bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT);
+}
+
+/**
+ * @brief Determine whether a struct kbasep_js_atom_retained_state is valid
+ *
+ * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the
+ * code should just ignore it.
+ *
+ * @param[in] katom_retained_state the atom's retained state to check
+ * @return false if the retained state is invalid, and can be ignored
+ * @return !=false if the retained state is valid
+ */
+static inline bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ return (bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
+}
+
+static inline bool kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state *katom_retained_state, int *res)
+{
+ int js = katom_retained_state->retry_submit_on_slot;
+
+ *res = js;
+ return (bool) (js >= 0);
+}
+
+#if KBASE_DEBUG_DISABLE_ASSERTS == 0
+/**
+ * Debug Check the refcount of a context. Only use within ASSERTs
+ *
+ * Obtains hwaccess_lock
+ *
+ * @return negative value if the context is not scheduled in
+ * @return current refcount of the context if it is scheduled in. The refcount
+ * is not guarenteed to be kept constant.
+ */
+static inline int kbasep_js_debug_check_ctx_refcount(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ int result = -1;
+ int as_nr;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID)
+ result = js_devdata->runpool_irq.per_as_data[as_nr].as_busy_refcount;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return result;
+}
+#endif /* KBASE_DEBUG_DISABLE_ASSERTS == 0 */
+
+/**
+ * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the
+ * context is guarenteed to be already previously retained.
+ *
+ * It is a programming error to supply the \a as_nr of a context that has not
+ * been previously retained/has a busy refcount of zero. The only exception is
+ * when there is no ctx in \a as_nr (NULL returned).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ *
+ * @return a valid struct kbase_context on success, with a refcount that is guarenteed
+ * to be non-zero and unmodified by this function.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ struct kbase_context *found_kctx;
+ struct kbasep_js_per_as_data *js_per_as_data;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+ js_devdata = &kbdev->js_data;
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ found_kctx = js_per_as_data->kctx;
+ KBASE_DEBUG_ASSERT(found_kctx == NULL || js_per_as_data->as_busy_refcount > 0);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return found_kctx;
+}
+
+/**
+ * This will provide a conversion from time (us) to ticks of the gpu clock
+ * based on the minimum available gpu frequency.
+ * This is usually good to compute best/worst case (where the use of current
+ * frequency is not valid due to DVFS).
+ * e.g.: when you need the number of cycles to guarantee you won't wait for
+ * longer than 'us' time (you might have a shorter wait).
+ */
+static inline u32 kbasep_js_convert_us_to_gpu_ticks_min_freq(struct kbase_device *kbdev, u32 us)
+{
+ u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_min;
+
+ KBASE_DEBUG_ASSERT(0 != gpu_freq);
+ return us * (gpu_freq / 1000);
+}
+
+/**
+ * This will provide a conversion from time (us) to ticks of the gpu clock
+ * based on the maximum available gpu frequency.
+ * This is usually good to compute best/worst case (where the use of current
+ * frequency is not valid due to DVFS).
+ * e.g.: When you need the number of cycles to guarantee you'll wait at least
+ * 'us' amount of time (but you might wait longer).
+ */
+static inline u32 kbasep_js_convert_us_to_gpu_ticks_max_freq(struct kbase_device *kbdev, u32 us)
+{
+ u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_max;
+
+ KBASE_DEBUG_ASSERT(0 != gpu_freq);
+ return us * (u32) (gpu_freq / 1000);
+}
+
+/**
+ * This will provide a conversion from ticks of the gpu clock to time (us)
+ * based on the minimum available gpu frequency.
+ * This is usually good to compute best/worst case (where the use of current
+ * frequency is not valid due to DVFS).
+ * e.g.: When you need to know the worst-case wait that 'ticks' cycles will
+ * take (you guarantee that you won't wait any longer than this, but it may
+ * be shorter).
+ */
+static inline u32 kbasep_js_convert_gpu_ticks_to_us_min_freq(struct kbase_device *kbdev, u32 ticks)
+{
+ u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_min;
+
+ KBASE_DEBUG_ASSERT(0 != gpu_freq);
+ return ticks / gpu_freq * 1000;
+}
+
+/**
+ * This will provide a conversion from ticks of the gpu clock to time (us)
+ * based on the maximum available gpu frequency.
+ * This is usually good to compute best/worst case (where the use of current
+ * frequency is not valid due to DVFS).
+ * e.g.: When you need to know the best-case wait for 'tick' cycles (you
+ * guarantee to be waiting for at least this long, but it may be longer).
+ */
+static inline u32 kbasep_js_convert_gpu_ticks_to_us_max_freq(struct kbase_device *kbdev, u32 ticks)
+{
+ u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_max;
+
+ KBASE_DEBUG_ASSERT(0 != gpu_freq);
+ return ticks / gpu_freq * 1000;
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+static inline void kbase_js_runpool_inc_context_count(
+ struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ /* Track total contexts */
+ KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
+ ++(js_devdata->nr_all_contexts_running);
+
+ if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ /* Track contexts that can submit jobs */
+ KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
+ S8_MAX);
+ ++(js_devdata->nr_user_contexts_running);
+ }
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+static inline void kbase_js_runpool_dec_context_count(
+ struct kbase_device *kbdev,
+ struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ /* Track total contexts */
+ --(js_devdata->nr_all_contexts_running);
+ KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
+
+ if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ /* Track contexts that can submit jobs */
+ --(js_devdata->nr_user_contexts_running);
+ KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
+ }
+}
+
+
+/**
+ * @brief Submit atoms from all available contexts to all job slots.
+ *
+ * This will attempt to submit as many jobs as possible. It will exit when
+ * either all job slots are full, or all contexts have been used.
+ *
+ * @param[in] kbdev Device pointer
+ */
+static inline void kbase_js_sched_all(struct kbase_device *kbdev)
+{
+ kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
+}
+
+extern const int
+kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS];
+
+extern const base_jd_prio
+kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+/**
+ * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio)
+ * to relative ordering
+ * @atom_prio: Priority ID to translate.
+ *
+ * Atom priority values for @ref base_jd_prio cannot be compared directly to
+ * find out which are higher or lower.
+ *
+ * This function will convert base_jd_prio values for successively lower
+ * priorities into a monotonically increasing sequence. That is, the lower the
+ * base_jd_prio priority, the higher the value produced by this function. This
+ * is in accordance with how the rest of the kernel treates priority.
+ *
+ * The mapping is 1:1 and the size of the valid input range is the same as the
+ * size of the valid output range, i.e.
+ * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS
+ *
+ * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions
+ *
+ * Return: On success: a value in the inclusive range
+ * 0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure:
+ * KBASE_JS_ATOM_SCHED_PRIO_INVALID
+ */
+static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)
+{
+ if (atom_prio >= BASE_JD_NR_PRIO_LEVELS)
+ return KBASE_JS_ATOM_SCHED_PRIO_INVALID;
+
+ return kbasep_js_atom_priority_to_relative[atom_prio];
+}
+
+static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio)
+{
+ unsigned int prio_idx;
+
+ KBASE_DEBUG_ASSERT(0 <= sched_prio
+ && sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT);
+
+ prio_idx = (unsigned int)sched_prio;
+
+ return kbasep_js_relative_priority_to_atom[prio_idx];
+}
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_js_ctx_attr.c b/midgard-0.r2p0/mali_kbase_js_ctx_attr.c
new file mode 100755
index 0000000..455b661
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_js_ctx_attr.c
@@ -0,0 +1,303 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config.h>
+
+/*
+ * Private functions follow
+ */
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, retain that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
+ KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] < S8_MAX);
+ ++(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+ if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 1) {
+ /* First refcount indicates a state change */
+ runpool_state_changed = true;
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_RUNPOOL, kctx, NULL, 0u, attribute);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, release that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
+ KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] > 0);
+ --(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+ if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 0) {
+ /* Last de-refcount indicates a state change */
+ runpool_state_changed = true;
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_RUNPOOL, kctx, NULL, 0u, attribute);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+/**
+ * @brief Retain a certain attribute on a ctx, also retaining it on the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] < U32_MAX);
+
+ ++(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+ if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+ /* Only ref-count the attribute on the runpool for the first time this contexts sees this attribute */
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_CTX, kctx, NULL, 0u, attribute);
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, attribute);
+ }
+
+ return runpool_state_changed;
+}
+
+/*
+ * @brief Release a certain attribute on a ctx, also releasing it from the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] > 0);
+
+ if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ /* Only de-ref-count the attribute on the runpool when this is the last ctx-reference to it */
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, attribute);
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_CTX, kctx, NULL, 0u, attribute);
+ }
+
+ /* De-ref must happen afterwards, because kbasep_js_ctx_attr_runpool_release() needs to check it too */
+ --(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+ return runpool_state_changed;
+}
+
+/*
+ * More commonly used public functions
+ */
+
+void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool runpool_state_changed = false;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+ /* This context never submits, so don't track any scheduling attributes */
+ return;
+ }
+
+ /* Transfer attributes held in the context flags for contexts that have submit enabled */
+
+ /* ... More attributes can be added here ... */
+
+ /* The context should not have been scheduled yet, so ASSERT if this caused
+ * runpool state changes (note that other threads *can't* affect the value
+ * of runpool_state_changed, due to how it's calculated) */
+ KBASE_DEBUG_ASSERT(runpool_state_changed == false);
+ CSTD_UNUSED(runpool_state_changed);
+}
+
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ bool runpool_state_changed;
+ int i;
+
+ /* Retain any existing attributes */
+ for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
+ /* The context is being scheduled in, so update the runpool with the new attributes */
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
+
+ /* We don't need to know about state changed, because retaining a
+ * context occurs on scheduling it, and that itself will also try
+ * to run new atoms */
+ CSTD_UNUSED(runpool_state_changed);
+ }
+ }
+}
+
+bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ bool runpool_state_changed = false;
+ int i;
+
+ /* Release any existing attributes */
+ for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
+ /* The context is being scheduled out, so update the runpool on the removed attributes */
+ runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ bool runpool_state_changed = false;
+ base_jd_core_req core_req;
+
+ KBASE_DEBUG_ASSERT(katom);
+ core_req = katom->core_req;
+
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+ else
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+ if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+ /* Atom that can run on slot1 or slot2, and can use all cores */
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+ }
+
+ /* We don't need to know about state changed, because retaining an
+ * atom occurs on adding it, and that itself will also try to run
+ * new atoms */
+ CSTD_UNUSED(runpool_state_changed);
+}
+
+bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+ bool runpool_state_changed = false;
+ base_jd_core_req core_req;
+
+ KBASE_DEBUG_ASSERT(katom_retained_state);
+ core_req = katom_retained_state->core_req;
+
+ /* No-op for invalid atoms */
+ if (kbasep_js_atom_retained_state_is_valid(katom_retained_state) == false)
+ return false;
+
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+ else
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+ if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+ /* Atom that can run on slot1 or slot2, and can use all cores */
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+ }
+
+ return runpool_state_changed;
+}
diff --git a/midgard-0.r2p0/mali_kbase_js_ctx_attr.h b/midgard-0.r2p0/mali_kbase_js_ctx_attr.h
new file mode 100755
index 0000000..ce91833
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_js_ctx_attr.h
@@ -0,0 +1,158 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_js_ctx_attr.h
+ * Job Scheduler Context Attribute APIs
+ */
+
+#ifndef _KBASE_JS_CTX_ATTR_H_
+#define _KBASE_JS_CTX_ATTR_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+
+/**
+ * Set the initial attributes of a context (when context create flags are set)
+ *
+ * Requires:
+ * - Hold the jsctx_mutex
+ */
+void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Retain all attributes of a context
+ *
+ * This occurs on scheduling in the context on the runpool (but after
+ * is_scheduled is set)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ */
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Release all attributes of a context
+ *
+ * This occurs on scheduling out the context from the runpool (but before
+ * is_scheduled is cleared)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Retain all attributes of an atom
+ *
+ * This occurs on adding an atom to a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ */
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * Release all attributes of an atom, given its retained state.
+ *
+ * This occurs after (permanently) removing an atom from a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * This is a no-op when \a katom_retained_state is invalid.
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static inline s8 kbasep_js_ctx_attr_count_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+
+ return js_devdata->runpool_irq.ctx_attr_ref_count[attribute];
+}
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static inline bool kbasep_js_ctx_attr_is_attr_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
+{
+ /* In general, attributes are 'on' when they have a non-zero refcount (note: the refcount will never be < 0) */
+ return (bool) kbasep_js_ctx_attr_count_on_runpool(kbdev, attribute);
+}
+
+/**
+ * Requires:
+ * - jsctx mutex
+ */
+static inline bool kbasep_js_ctx_attr_is_attr_on_ctx(struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+ struct kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* In general, attributes are 'on' when they have a refcount (which should never be < 0) */
+ return (bool) (js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+}
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_DEFS_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_js_defs.h b/midgard-0.r2p0/mali_kbase_js_defs.h
new file mode 100755
index 0000000..e6a9d41
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_js_defs.h
@@ -0,0 +1,466 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler Type Definitions
+ */
+
+#ifndef _KBASE_JS_DEFS_H_
+#define _KBASE_JS_DEFS_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+/* Forward decls */
+struct kbase_device;
+struct kbase_jd_atom;
+
+
+typedef u32 kbase_context_flags;
+
+struct kbasep_atom_req {
+ base_jd_core_req core_req;
+ kbase_context_flags ctx_req;
+ u32 device_nr;
+};
+
+#include "mali_kbase_js_policy_cfs.h"
+
+/* Wrapper Interface - doxygen is elsewhere */
+union kbasep_js_policy {
+ struct kbasep_js_policy_cfs cfs;
+};
+
+/* Wrapper Interface - doxygen is elsewhere */
+union kbasep_js_policy_ctx_info {
+ struct kbasep_js_policy_cfs_ctx cfs;
+};
+
+/* Wrapper Interface - doxygen is elsewhere */
+union kbasep_js_policy_job_info {
+ struct kbasep_js_policy_cfs_job cfs;
+};
+
+
+/** Callback function run on all of a context's jobs registered with the Job
+ * Scheduler */
+typedef void (*kbasep_js_policy_ctx_job_cb)(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Maximum number of jobs that can be submitted to a job slot whilst
+ * inside the IRQ handler.
+ *
+ * This is important because GPU NULL jobs can complete whilst the IRQ handler
+ * is running. Otherwise, it potentially allows an unlimited number of GPU NULL
+ * jobs to be submitted inside the IRQ handler, which increases IRQ latency.
+ */
+#define KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ 2
+
+/**
+ * @brief the IRQ_THROTTLE time in microseconds
+ *
+ * This will be converted via the GPU's clock frequency into a cycle-count.
+ *
+ * @note we can make an estimate of the GPU's frequency by periodically
+ * sampling its CYCLE_COUNT register
+ */
+#define KBASE_JS_IRQ_THROTTLE_TIME_US 20
+
+/**
+ * @brief Context attributes
+ *
+ * Each context attribute can be thought of as a boolean value that caches some
+ * state information about either the runpool, or the context:
+ * - In the case of the runpool, it is a cache of "Do any contexts owned by
+ * the runpool have attribute X?"
+ * - In the case of a context, it is a cache of "Do any atoms owned by the
+ * context have attribute X?"
+ *
+ * The boolean value of the context attributes often affect scheduling
+ * decisions, such as affinities to use and job slots to use.
+ *
+ * To accomodate changes of state in the context, each attribute is refcounted
+ * in the context, and in the runpool for all running contexts. Specifically:
+ * - The runpool holds a refcount of how many contexts in the runpool have this
+ * attribute.
+ * - The context holds a refcount of how many atoms have this attribute.
+ */
+enum kbasep_js_ctx_attr {
+ /** Attribute indicating a context that contains Compute jobs. That is,
+ * the context has jobs of type @ref BASE_JD_REQ_ONLY_COMPUTE
+ *
+ * @note A context can be both 'Compute' and 'Non Compute' if it contains
+ * both types of jobs.
+ */
+ KBASEP_JS_CTX_ATTR_COMPUTE,
+
+ /** Attribute indicating a context that contains Non-Compute jobs. That is,
+ * the context has some jobs that are \b not of type @ref
+ * BASE_JD_REQ_ONLY_COMPUTE. The context usually has
+ * BASE_CONTEXT_HINT_COMPUTE \b clear, but this depends on the HW
+ * workarounds in use in the Job Scheduling Policy.
+ *
+ * @note A context can be both 'Compute' and 'Non Compute' if it contains
+ * both types of jobs.
+ */
+ KBASEP_JS_CTX_ATTR_NON_COMPUTE,
+
+ /** Attribute indicating that a context contains compute-job atoms that
+ * aren't restricted to a coherent group, and can run on all cores.
+ *
+ * Specifically, this is when the atom's \a core_req satisfy:
+ * - (\a core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T) // uses slot 1 or slot 2
+ * - && !(\a core_req & BASE_JD_REQ_COHERENT_GROUP) // not restricted to coherent groups
+ *
+ * Such atoms could be blocked from running if one of the coherent groups
+ * is being used by another job slot, so tracking this context attribute
+ * allows us to prevent such situations.
+ *
+ * @note This doesn't take into account the 1-coregroup case, where all
+ * compute atoms would effectively be able to run on 'all cores', but
+ * contexts will still not always get marked with this attribute. Instead,
+ * it is the caller's responsibility to take into account the number of
+ * coregroups when interpreting this attribute.
+ *
+ * @note Whilst Tiler atoms are normally combined with
+ * BASE_JD_REQ_COHERENT_GROUP, it is possible to send such atoms without
+ * BASE_JD_REQ_COHERENT_GROUP set. This is an unlikely case, but it's easy
+ * enough to handle anyway.
+ */
+ KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES,
+
+ /** Must be the last in the enum */
+ KBASEP_JS_CTX_ATTR_COUNT
+};
+
+enum {
+ /** Bit indicating that new atom should be started because this atom completed */
+ KBASE_JS_ATOM_DONE_START_NEW_ATOMS = (1u << 0),
+ /** Bit indicating that the atom was evicted from the JS_NEXT registers */
+ KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT = (1u << 1)
+};
+
+/** Combination of KBASE_JS_ATOM_DONE_<...> bits */
+typedef u32 kbasep_js_atom_done_code;
+
+/**
+ * Data used by the scheduler that is unique for each Address Space.
+ *
+ * This is used in IRQ context and hwaccess_lock must be held whilst accessing
+ * this data (inculding reads and atomic decisions based on the read).
+ */
+struct kbasep_js_per_as_data {
+ /**
+ * Ref count of whether this AS is busy, and must not be scheduled out
+ *
+ * When jobs are running this is always positive. However, it can still be
+ * positive when no jobs are running. If all you need is a heuristic to
+ * tell you whether jobs might be running, this should be sufficient.
+ */
+ int as_busy_refcount;
+
+ /** Pointer to the current context on this address space, or NULL for no context */
+ struct kbase_context *kctx;
+};
+
+/**
+ * @brief KBase Device Data Job Scheduler sub-structure
+ *
+ * This encapsulates the current context of the Job Scheduler on a particular
+ * device. This context is global to the device, and is not tied to any
+ * particular struct kbase_context running on the device.
+ *
+ * nr_contexts_running and as_free are optimized for packing together (by making
+ * them smaller types than u32). The operations on them should rarely involve
+ * masking. The use of signed types for arithmetic indicates to the compiler that
+ * the value will not rollover (which would be undefined behavior), and so under
+ * the Total License model, it is free to make optimizations based on that (i.e.
+ * to remove masking).
+ */
+struct kbasep_js_device_data {
+ /* Sub-structure to collect together Job Scheduling data used in IRQ
+ * context. The hwaccess_lock must be held when accessing. */
+ struct runpool_irq {
+ /** Bitvector indicating whether a currently scheduled context is allowed to submit jobs.
+ * When bit 'N' is set in this, it indicates whether the context bound to address space
+ * 'N' (per_as_data[N].kctx) is allowed to submit jobs.
+ *
+ * It is placed here because it's much more memory efficient than having a u8 in
+ * struct kbasep_js_per_as_data to store this flag */
+ u16 submit_allowed;
+
+ /** Context Attributes:
+ * Each is large enough to hold a refcount of the number of contexts
+ * that can fit into the runpool. This is currently BASE_MAX_NR_AS
+ *
+ * Note that when BASE_MAX_NR_AS==16 we need 5 bits (not 4) to store
+ * the refcount. Hence, it's not worthwhile reducing this to
+ * bit-manipulation on u32s to save space (where in contrast, 4 bit
+ * sub-fields would be easy to do and would save space).
+ *
+ * Whilst this must not become negative, the sign bit is used for:
+ * - error detection in debug builds
+ * - Optimization: it is undefined for a signed int to overflow, and so
+ * the compiler can optimize for that never happening (thus, no masking
+ * is required on updating the variable) */
+ s8 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+ /** Data that is unique for each AS */
+ struct kbasep_js_per_as_data per_as_data[BASE_MAX_NR_AS];
+
+ /*
+ * Affinity management and tracking
+ */
+ /** Bitvector to aid affinity checking. Element 'n' bit 'i' indicates
+ * that slot 'n' is using core i (i.e. slot_affinity_refcount[n][i] > 0) */
+ u64 slot_affinities[BASE_JM_MAX_NR_SLOTS];
+ /** Refcount for each core owned by each slot. Used to generate the
+ * slot_affinities array of bitvectors
+ *
+ * The value of the refcount will not exceed BASE_JM_SUBMIT_SLOTS,
+ * because it is refcounted only when a job is definitely about to be
+ * submitted to a slot, and is de-refcounted immediately after a job
+ * finishes */
+ s8 slot_affinity_refcount[BASE_JM_MAX_NR_SLOTS][64];
+ } runpool_irq;
+
+ /**
+ * Run Pool mutex, for managing contexts within the runpool.
+ * Unless otherwise specified, you must hold this lock whilst accessing any
+ * members that follow
+ *
+ * In addition, this is used to access:
+ * - the kbasep_js_kctx_info::runpool substructure
+ */
+ struct mutex runpool_mutex;
+
+ /**
+ * Queue Lock, used to access the Policy's queue of contexts independently
+ * of the Run Pool.
+ *
+ * Of course, you don't need the Run Pool lock to access this.
+ */
+ struct mutex queue_mutex;
+
+ /**
+ * Scheduling semaphore. This must be held when calling
+ * kbase_jm_kick()
+ */
+ struct semaphore schedule_sem;
+
+ /**
+ * List of contexts that can currently be pulled from
+ */
+ struct list_head ctx_list_pullable[BASE_JM_MAX_NR_SLOTS];
+ /**
+ * List of contexts that can not currently be pulled from, but have
+ * jobs currently running.
+ */
+ struct list_head ctx_list_unpullable[BASE_JM_MAX_NR_SLOTS];
+
+ u16 as_free; /**< Bitpattern of free Address Spaces */
+
+ /** Number of currently scheduled user contexts (excluding ones that are not submitting jobs) */
+ s8 nr_user_contexts_running;
+ /** Number of currently scheduled contexts (including ones that are not submitting jobs) */
+ s8 nr_all_contexts_running;
+
+ /**
+ * Policy-specific information.
+ *
+ * Refer to the structure defined by the current policy to determine which
+ * locks must be held when accessing this.
+ */
+ union kbasep_js_policy policy;
+
+ /** Core Requirements to match up with base_js_atom's core_req memeber
+ * @note This is a write-once member, and so no locking is required to read */
+ base_jd_core_req js_reqs[BASE_JM_MAX_NR_SLOTS];
+
+ u32 scheduling_period_ns; /*< Value for JS_SCHEDULING_PERIOD_NS */
+ u32 soft_stop_ticks; /*< Value for JS_SOFT_STOP_TICKS */
+ u32 soft_stop_ticks_cl; /*< Value for JS_SOFT_STOP_TICKS_CL */
+ u32 hard_stop_ticks_ss; /*< Value for JS_HARD_STOP_TICKS_SS */
+ u32 hard_stop_ticks_cl; /*< Value for JS_HARD_STOP_TICKS_CL */
+ u32 hard_stop_ticks_dumping; /*< Value for JS_HARD_STOP_TICKS_DUMPING */
+ u32 gpu_reset_ticks_ss; /*< Value for JS_RESET_TICKS_SS */
+ u32 gpu_reset_ticks_cl; /*< Value for JS_RESET_TICKS_CL */
+ u32 gpu_reset_ticks_dumping; /*< Value for JS_RESET_TICKS_DUMPING */
+ u32 ctx_timeslice_ns; /**< Value for JS_CTX_TIMESLICE_NS */
+ u32 cfs_ctx_runtime_init_slices; /**< Value for DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES */
+ u32 cfs_ctx_runtime_min_slices; /**< Value for DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES */
+
+ /**< Value for JS_SOFT_JOB_TIMEOUT */
+ atomic_t soft_job_timeout_ms;
+
+ /** List of suspended soft jobs */
+ struct list_head suspended_soft_jobs_list;
+
+#ifdef CONFIG_MALI_DEBUG
+ /* Support soft-stop on a single context */
+ bool softstop_always;
+#endif /* CONFIG_MALI_DEBUG */
+
+ /** The initalized-flag is placed at the end, to avoid cache-pollution (we should
+ * only be using this during init/term paths).
+ * @note This is a write-once member, and so no locking is required to read */
+ int init_status;
+
+ /* Number of contexts that can currently be pulled from */
+ u32 nr_contexts_pullable;
+
+ /* Number of contexts that can either be pulled from or are currently
+ * running */
+ atomic_t nr_contexts_runnable;
+};
+
+/**
+ * @brief KBase Context Job Scheduling information structure
+ *
+ * This is a substructure in the struct kbase_context that encapsulates all the
+ * scheduling information.
+ */
+struct kbasep_js_kctx_info {
+ /**
+ * Runpool substructure. This must only be accessed whilst the Run Pool
+ * mutex ( kbasep_js_device_data::runpool_mutex ) is held.
+ *
+ * In addition, the hwaccess_lock may need to be held for certain
+ * sub-members.
+ *
+ * @note some of the members could be moved into struct kbasep_js_device_data for
+ * improved d-cache/tlb efficiency.
+ */
+ struct {
+ union kbasep_js_policy_ctx_info policy_ctx; /**< Policy-specific context */
+ } runpool;
+
+ /**
+ * Job Scheduler Context information sub-structure. These members are
+ * accessed regardless of whether the context is:
+ * - In the Policy's Run Pool
+ * - In the Policy's Queue
+ * - Not queued nor in the Run Pool.
+ *
+ * You must obtain the jsctx_mutex before accessing any other members of
+ * this substructure.
+ *
+ * You may not access any of these members from IRQ context.
+ */
+ struct kbase_jsctx {
+ struct mutex jsctx_mutex; /**< Job Scheduler Context lock */
+
+ /** Number of jobs <b>ready to run</b> - does \em not include the jobs waiting in
+ * the dispatcher, and dependency-only jobs. See kbase_jd_context::job_nr
+ * for such jobs*/
+ u32 nr_jobs;
+
+ /** Context Attributes:
+ * Each is large enough to hold a refcount of the number of atoms on
+ * the context. **/
+ u32 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+ /**
+ * Wait queue to wait for KCTX_SHEDULED flag state changes.
+ * */
+ wait_queue_head_t is_scheduled_wait;
+
+ /** Link implementing JS queues. Context can be present on one
+ * list per job slot
+ */
+ struct list_head ctx_list_entry[BASE_JM_MAX_NR_SLOTS];
+ } ctx;
+
+ /* The initalized-flag is placed at the end, to avoid cache-pollution (we should
+ * only be using this during init/term paths) */
+ int init_status;
+};
+
+/** Subset of atom state that can be available after jd_done_nolock() is called
+ * on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(),
+ * because the original atom could disappear. */
+struct kbasep_js_atom_retained_state {
+ /** Event code - to determine whether the atom has finished */
+ enum base_jd_event_code event_code;
+ /** core requirements */
+ base_jd_core_req core_req;
+ /* priority */
+ int sched_priority;
+ /** Job Slot to retry submitting to if submission from IRQ handler failed */
+ int retry_submit_on_slot;
+ /* Core group atom was executed on */
+ u32 device_nr;
+
+};
+
+/**
+ * Value signifying 'no retry on a slot required' for:
+ * - kbase_js_atom_retained_state::retry_submit_on_slot
+ * - kbase_jd_atom::retry_submit_on_slot
+ */
+#define KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID (-1)
+
+/**
+ * base_jd_core_req value signifying 'invalid' for a kbase_jd_atom_retained_state.
+ *
+ * @see kbase_atom_retained_state_is_valid()
+ */
+#define KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID BASE_JD_REQ_DEP
+
+/**
+ * @brief The JS timer resolution, in microseconds
+ *
+ * Any non-zero difference in time will be at least this size.
+ */
+#define KBASEP_JS_TICK_RESOLUTION_US 1
+
+/*
+ * Internal atom priority defines for kbase_jd_atom::sched_prio
+ */
+enum {
+ KBASE_JS_ATOM_SCHED_PRIO_HIGH = 0,
+ KBASE_JS_ATOM_SCHED_PRIO_MED,
+ KBASE_JS_ATOM_SCHED_PRIO_LOW,
+ KBASE_JS_ATOM_SCHED_PRIO_COUNT,
+};
+
+/* Invalid priority for kbase_jd_atom::sched_prio */
+#define KBASE_JS_ATOM_SCHED_PRIO_INVALID -1
+
+/* Default priority in the case of contexts with no atoms, or being lenient
+ * about invalid priorities from userspace */
+#define KBASE_JS_ATOM_SCHED_PRIO_DEFAULT KBASE_JS_ATOM_SCHED_PRIO_MED
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_DEFS_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_js_policy.h b/midgard-0.r2p0/mali_kbase_js_policy.h
new file mode 100755
index 0000000..d1f3a0a
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_js_policy.h
@@ -0,0 +1,763 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_js_policy.h
+ * Job Scheduler Policy APIs.
+ */
+
+#ifndef _KBASE_JS_POLICY_H_
+#define _KBASE_JS_POLICY_H_
+
+/**
+ * @page page_kbase_js_policy Job Scheduling Policies
+ * The Job Scheduling system is described in the following:
+ * - @subpage page_kbase_js_policy_overview
+ * - @subpage page_kbase_js_policy_operation
+ *
+ * The API details are as follows:
+ * - @ref kbase_jm
+ * - @ref kbase_js
+ * - @ref kbase_js_policy
+ */
+
+/**
+ * @page page_kbase_js_policy_overview Overview of the Policy System
+ *
+ * The Job Scheduler Policy manages:
+ * - The assigning of KBase Contexts to GPU Address Spaces (\em ASs)
+ * - The choosing of Job Chains (\em Jobs) from a KBase context, to run on the
+ * GPU's Job Slots (\em JSs).
+ * - The amount of \em time a context is assigned to (<em>scheduled on</em>) an
+ * Address Space
+ * - The amount of \em time a Job spends running on the GPU
+ *
+ * The Policy implements this management via 2 components:
+ * - A Policy Queue, which manages a set of contexts that are ready to run,
+ * but not currently running.
+ * - A Policy Run Pool, which manages the currently running contexts (one per Address
+ * Space) and the jobs to run on the Job Slots.
+ *
+ * Each Graphics Process in the system has at least one KBase Context. Therefore,
+ * the Policy Queue can be seen as a queue of Processes waiting to run Jobs on
+ * the GPU.
+ *
+ * <!-- The following needs to be all on one line, due to doxygen's parser -->
+ * @dotfile policy_overview.dot "Diagram showing a very simplified overview of the Policy System. IRQ handling, soft/hard-stopping, contexts re-entering the system and Policy details are omitted"
+ *
+ * The main operations on the queue are:
+ * - Enqueuing a Context to it
+ * - Dequeuing a Context from it, to run it.
+ * - Note: requeuing a context is much the same as enqueuing a context, but
+ * occurs when a context is scheduled out of the system to allow other contexts
+ * to run.
+ *
+ * These operations have much the same meaning for the Run Pool - Jobs are
+ * dequeued to run on a Jobslot, and requeued when they are scheduled out of
+ * the GPU.
+ *
+ * @note This is an over-simplification of the Policy APIs - there are more
+ * operations than 'Enqueue'/'Dequeue', and a Dequeue from the Policy Queue
+ * takes at least two function calls: one to Dequeue from the Queue, one to add
+ * to the Run Pool.
+ *
+ * As indicated on the diagram, Jobs permanently leave the scheduling system
+ * when they are completed, otherwise they get dequeued/requeued until this
+ * happens. Similarly, Contexts leave the scheduling system when their jobs
+ * have all completed. However, Contexts may later return to the scheduling
+ * system (not shown on the diagram) if more Bags of Jobs are submitted to
+ * them.
+ */
+
+/**
+ * @page page_kbase_js_policy_operation Policy Operation
+ *
+ * We describe the actions that the Job Scheduler Core takes on the Policy in
+ * the following cases:
+ * - The IRQ Path
+ * - The Job Submission Path
+ * - The High Priority Job Submission Path
+ *
+ * This shows how the Policy APIs will be used by the Job Scheduler core.
+ *
+ * The following diagram shows an example Policy that contains a Low Priority
+ * queue, and a Real-time (High Priority) Queue. The RT queue is examined
+ * before the LowP one on dequeuing from the head. The Low Priority Queue is
+ * ordered by time, and the RT queue is ordered by time weighted by
+ * RT-priority. In addition, it shows that the Job Scheduler Core will start a
+ * Soft-Stop Timer (SS-Timer) when it dequeue's and submits a job. The
+ * Soft-Stop time is set by a global configuration value, and must be a value
+ * appropriate for the policy. For example, this could include "don't run a
+ * soft-stop timer" for a First-Come-First-Served (FCFS) policy.
+ *
+ * <!-- The following needs to be all on one line, due to doxygen's parser -->
+ * @dotfile policy_operation_diagram.dot "Diagram showing the objects managed by an Example Policy, and the operations made upon these objects by the Job Scheduler Core."
+ *
+ * @section sec_kbase_js_policy_operation_prio Dealing with Priority
+ *
+ * Priority applies separately to a context as a whole, and to the jobs within
+ * a context. The jobs specify a priority in the base_jd_atom::prio member, but
+ * it is independent of the context priority. That is, it only affects
+ * scheduling of atoms within a context. Refer to @ref base_jd_prio for more
+ * details. The meaning of the context's priority value is up to the policy
+ * itself, and could be a logarithmic scale instead of a linear scale (e.g. the
+ * policy could implement an increase/decrease in priority by 1 results in an
+ * increase/decrease in \em proportion of time spent scheduled in by 25%, an
+ * effective change in timeslice by 11%).
+ *
+ * It is up to the policy whether a boost in priority boosts the priority of
+ * the entire context (e.g. to such an extent where it may pre-empt other
+ * running contexts). If it chooses to do this, the Policy must make sure that
+ * only jobs from high-priority contexts are run, and that the context is
+ * scheduled out once only jobs from low priority contexts remain. This ensures
+ * that the low priority contexts do not gain from the priority boost, yet they
+ * still get scheduled correctly with respect to other low priority contexts.
+ *
+ *
+ * @section sec_kbase_js_policy_operation_irq IRQ Path
+ *
+ * The following happens on the IRQ path from the Job Scheduler Core:
+ * - Note the slot that completed (for later)
+ * - Log the time spent by the job (and implicitly, the time spent by the
+ * context)
+ * - call kbasep_js_policy_log_job_result() <em>in the context of the irq
+ * handler.</em>
+ * - This must happen regardless of whether the job completed successfully or
+ * not (otherwise the context gets away with DoS'ing the system with faulty jobs)
+ * - What was the result of the job?
+ * - If Completed: job is just removed from the system
+ * - If Hard-stop or failure: job is removed from the system
+ * - If Soft-stop: queue the book-keeping work onto a work-queue: have a
+ * work-queue call kbasep_js_policy_enqueue_job()
+ * - Check the timeslice used by the owning context
+ * - call kbasep_js_policy_should_remove_ctx() <em>in the context of the irq
+ * handler.</em>
+ * - If this returns true, clear the "allowed" flag.
+ * - Check the ctx's flags for "allowed", "has jobs to run" and "is running
+ * jobs"
+ * - And so, should the context stay scheduled in?
+ * - If No, push onto a work-queue the work of scheduling out the old context,
+ * and getting a new one. That is:
+ * - kbasep_js_policy_runpool_remove_ctx() on old_ctx
+ * - kbasep_js_policy_enqueue_ctx() on old_ctx
+ * - kbasep_js_policy_dequeue_head_ctx() to get new_ctx
+ * - kbasep_js_policy_runpool_add_ctx() on new_ctx
+ * - (all of this work is deferred on a work-queue to keep the IRQ handler quick)
+ * - If there is space in the completed job slots' HEAD/NEXT registers, run the next job:
+ * - kbasep_js_policy_dequeue_job() <em>in the context of the irq
+ * handler</em> with core_req set to that of the completing slot
+ * - if this returned true, submit the job to the completed slot.
+ * - This is repeated until kbasep_js_policy_dequeue_job() returns
+ * false, or the job slot has a job queued on both the HEAD and NEXT registers.
+ * - If kbasep_js_policy_dequeue_job() returned false, submit some work to
+ * the work-queue to retry from outside of IRQ context (calling
+ * kbasep_js_policy_dequeue_job() from a work-queue).
+ *
+ * Since the IRQ handler submits new jobs \em and re-checks the IRQ_RAWSTAT,
+ * this sequence could loop a large number of times: this could happen if
+ * the jobs submitted completed on the GPU very quickly (in a few cycles), such
+ * as GPU NULL jobs. Then, the HEAD/NEXT registers will always be free to take
+ * more jobs, causing us to loop until we run out of jobs.
+ *
+ * To mitigate this, we must limit the number of jobs submitted per slot during
+ * the IRQ handler - for example, no more than 2 jobs per slot per IRQ should
+ * be sufficient (to fill up the HEAD + NEXT registers in normal cases). For
+ * Mali-T600 with 3 job slots, this means that up to 6 jobs could be submitted per
+ * slot. Note that IRQ Throttling can make this situation commonplace: 6 jobs
+ * could complete but the IRQ for each of them is delayed by the throttling. By
+ * the time you get the IRQ, all 6 jobs could've completed, meaning you can
+ * submit jobs to fill all 6 HEAD+NEXT registers again.
+ *
+ * @note As much work is deferred as possible, which includes the scheduling
+ * out of a context and scheduling in a new context. However, we can still make
+ * starting a single high-priorty context quick despite this:
+ * - On Mali-T600 family, there is one more AS than JSs.
+ * - This means we can very quickly schedule out one AS, no matter what the
+ * situation (because there will always be one AS that's not currently running
+ * on the job slot - it can only have a job in the NEXT register).
+ * - Even with this scheduling out, fair-share can still be guaranteed e.g. by
+ * a timeline-based Completely Fair Scheduler.
+ * - When our high-priority context comes in, we can do this quick-scheduling
+ * out immediately, and then schedule in the high-priority context without having to block.
+ * - This all assumes that the context to schedule out is of lower
+ * priority. Otherwise, we will have to block waiting for some other low
+ * priority context to finish its jobs. Note that it's likely (but not
+ * impossible) that the high-priority context \b is running jobs, by virtue of
+ * it being high priority.
+ * - Therefore, we can give a high liklihood that on Mali-T600 at least one
+ * high-priority context can be started very quickly. For the general case, we
+ * can guarantee starting (no. ASs) - (no. JSs) high priority contexts
+ * quickly. In any case, there is a high likelihood that we're able to start
+ * more than one high priority context quickly.
+ *
+ * In terms of the functions used in the IRQ handler directly, these are the
+ * perfomance considerations:
+ * - kbase_js_policy_log_job_result():
+ * - This is just adding to a 64-bit value (possibly even a 32-bit value if we
+ * only store the time the job's recently spent - see below on 'priority weighting')
+ * - For priority weighting, a divide operation ('div') could happen, but
+ * this can happen in a deferred context (outside of IRQ) when scheduling out
+ * the ctx; as per our Engineering Specification, the contexts of different
+ * priority still stay scheduled in for the same timeslice, but higher priority
+ * ones scheduled back in more often.
+ * - That is, the weighted and unweighted times must be stored separately, and
+ * the weighted time is only updated \em outside of IRQ context.
+ * - Of course, this divide is more likely to be a 'multiply by inverse of the
+ * weight', assuming that the weight (priority) doesn't change.
+ * - kbasep_js_policy_should_remove_ctx():
+ * - This is usually just a comparison of the stored time value against some
+ * maximum value.
+ *
+ * @note all deferred work can be wrapped up into one call - we usually need to
+ * indicate that a job/bag is done outside of IRQ context anyway.
+ *
+ *
+ *
+ * @section sec_kbase_js_policy_operation_submit Submission path
+ *
+ * Start with a Context with no jobs present, and assume equal priority of all
+ * contexts in the system. The following work all happens outside of IRQ
+ * Context :
+ * - As soon as job is made 'ready to 'run', then is must be registerd with the Job
+ * Scheduler Policy:
+ * - 'Ready to run' means they've satisified their dependencies in the
+ * Kernel-side Job Dispatch system.
+ * - Call kbasep_js_policy_enqueue_job()
+ * - This indicates that the job should be scheduled (it is ready to run).
+ * - As soon as a ctx changes from having 0 jobs 'ready to run' to >0 jobs
+ * 'ready to run', we enqueue the context on the policy queue:
+ * - Call kbasep_js_policy_enqueue_ctx()
+ * - This indicates that the \em ctx should be scheduled (it is ready to run)
+ *
+ * Next, we need to handle adding a context to the Run Pool - if it's sensible
+ * to do so. This can happen due to two reasons:
+ * -# A context is enqueued as above, and there are ASs free for it to run on
+ * (e.g. it is the first context to be run, in which case it can be added to
+ * the Run Pool immediately after enqueuing on the Policy Queue)
+ * -# A previous IRQ caused another ctx to be scheduled out, requiring that the
+ * context at the head of the queue be scheduled in. Such steps would happen in
+ * a work queue (work deferred from the IRQ context).
+ *
+ * In both cases, we'd handle it as follows:
+ * - Get the context at the Head of the Policy Queue:
+ * - Call kbasep_js_policy_dequeue_head_ctx()
+ * - Assign the Context an Address Space (Assert that there will be one free,
+ * given the above two reasons)
+ * - Add this context to the Run Pool:
+ * - Call kbasep_js_policy_runpool_add_ctx()
+ * - Now see if a job should be run:
+ * - Mostly, this will be done in the IRQ handler at the completion of a
+ * previous job.
+ * - However, there are two cases where this cannot be done: a) The first job
+ * enqueued to the system (there is no previous IRQ to act upon) b) When jobs
+ * are submitted at a low enough rate to not fill up all Job Slots (or, not to
+ * fill both the 'HEAD' and 'NEXT' registers in the job-slots)
+ * - Hence, on each ctx <b>and job</b> submission we should try to see if we
+ * can run a job:
+ * - For each job slot that has free space (in NEXT or HEAD+NEXT registers):
+ * - Call kbasep_js_policy_dequeue_job() with core_req set to that of the
+ * slot
+ * - if we got one, submit it to the job slot.
+ * - This is repeated until kbasep_js_policy_dequeue_job() returns
+ * false, or the job slot has a job queued on both the HEAD and NEXT registers.
+ *
+ * The above case shows that we should attempt to run jobs in cases where a) a ctx
+ * has been added to the Run Pool, and b) new jobs have been added to a context
+ * in the Run Pool:
+ * - In the latter case, the context is in the runpool because it's got a job
+ * ready to run, or is already running a job
+ * - We could just wait until the IRQ handler fires, but for certain types of
+ * jobs this can take comparatively a long time to complete, e.g. GLES FS jobs
+ * generally take much longer to run that GLES CS jobs, which are vertex shader
+ * jobs.
+ * - Therefore, when a new job appears in the ctx, we must check the job-slots
+ * to see if they're free, and run the jobs as before.
+ *
+ *
+ *
+ * @section sec_kbase_js_policy_operation_submit_hipri Submission path for High Priority Contexts
+ *
+ * For High Priority Contexts on Mali-T600, we can make sure that at least 1 of
+ * them can be scheduled in immediately to start high prioriy jobs. In general,
+ * (no. ASs) - (no JSs) high priority contexts may be started immediately. The
+ * following describes how this happens:
+ *
+ * Similar to the previous section, consider what happens with a high-priority
+ * context (a context with a priority higher than that of any in the Run Pool)
+ * that starts out with no jobs:
+ * - A job becomes ready to run on the context, and so we enqueue the context
+ * on the Policy's Queue.
+ * - However, we'd like to schedule in this context immediately, instead of
+ * waiting for one of the Run Pool contexts' timeslice to expire
+ * - The policy's Enqueue function must detect this (because it is the policy
+ * that embodies the concept of priority), and take appropriate action
+ * - That is, kbasep_js_policy_enqueue_ctx() should check the Policy's Run
+ * Pool to see if a lower priority context should be scheduled out, and then
+ * schedule in the High Priority context.
+ * - For Mali-T600, we can always pick a context to schedule out immediately
+ * (because there are more ASs than JSs), and so scheduling out a victim context
+ * and scheduling in the high priority context can happen immediately.
+ * - If a policy implements fair-sharing, then this can still ensure the
+ * victim later on gets a fair share of the GPU.
+ * - As a note, consider whether the victim can be of equal/higher priority
+ * than the incoming context:
+ * - Usually, higher priority contexts will be the ones currently running
+ * jobs, and so the context with the lowest priority is usually not running
+ * jobs.
+ * - This makes it likely that the victim context is low priority, but
+ * it's not impossible for it to be a high priority one:
+ * - Suppose 3 high priority contexts are submitting only FS jobs, and one low
+ * priority context submitting CS jobs. Then, the context not running jobs will
+ * be one of the hi priority contexts (because only 2 FS jobs can be
+ * queued/running on the GPU HW for Mali-T600).
+ * - The problem can be mitigated by extra action, but it's questionable
+ * whether we need to: we already have a high likelihood that there's at least
+ * one high priority context - that should be good enough.
+ * - And so, this method makes sure that at least one high priority context
+ * can be started very quickly, but more than one high priority contexts could be
+ * delayed (up to one timeslice).
+ * - To improve this, use a GPU with a higher number of Address Spaces vs Job
+ * Slots.
+ * - At this point, let's assume this high priority context has been scheduled
+ * in immediately. The next step is to ensure it can start some jobs quickly.
+ * - It must do this by Soft-Stopping jobs on any of the Job Slots that it can
+ * submit to.
+ * - The rest of the logic for starting the jobs is taken care of by the IRQ
+ * handler. All the policy needs to do is ensure that
+ * kbasep_js_policy_dequeue_job() will return the jobs from the high priority
+ * context.
+ *
+ * @note in SS state, we currently only use 2 job-slots (even for T608, but
+ * this might change in future). In this case, it's always possible to schedule
+ * out 2 ASs quickly (their jobs won't be in the HEAD registers). At the same
+ * time, this maximizes usage of the job-slots (only 2 are in use), because you
+ * can guarantee starting of the jobs from the High Priority contexts immediately too.
+ *
+ *
+ *
+ * @section sec_kbase_js_policy_operation_notes Notes
+ *
+ * - In this design, a separate 'init' is needed from dequeue/requeue, so that
+ * information can be retained between the dequeue/requeue calls. For example,
+ * the total time spent for a context/job could be logged between
+ * dequeue/requeuing, to implement Fair Sharing. In this case, 'init' just
+ * initializes that information to some known state.
+ *
+ *
+ *
+ */
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js_policy Job Scheduler Policy APIs
+ * @{
+ *
+ * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
+ * the Job Scheduler Policy and its use from the Job Scheduler Core</b>.
+ */
+
+/**
+ * @brief Job Scheduler Policy structure
+ */
+union kbasep_js_policy;
+
+/**
+ * @brief Initialize the Job Scheduler Policy
+ */
+int kbasep_js_policy_init(struct kbase_device *kbdev);
+
+/**
+ * @brief Terminate the Job Scheduler Policy
+ */
+void kbasep_js_policy_term(union kbasep_js_policy *js_policy);
+
+/**
+ * @addtogroup kbase_js_policy_ctx Job Scheduler Policy, Context Management API
+ * @{
+ *
+ * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
+ * the Job Scheduler Policy and its use from the Job Scheduler Core</b>.
+ */
+
+/**
+ * @brief Job Scheduler Policy Ctx Info structure
+ *
+ * This structure is embedded in the struct kbase_context structure. It is used to:
+ * - track information needed for the policy to schedule the context (e.g. time
+ * used, OS priority etc.)
+ * - link together kbase_contexts into a queue, so that a struct kbase_context can be
+ * obtained as the container of the policy ctx info. This allows the API to
+ * return what "the next context" should be.
+ * - obtain other information already stored in the struct kbase_context for
+ * scheduling purposes (e.g process ID to get the priority of the originating
+ * process)
+ */
+union kbasep_js_policy_ctx_info;
+
+/**
+ * @brief Initialize a ctx for use with the Job Scheduler Policy
+ *
+ * This effectively initializes the union kbasep_js_policy_ctx_info structure within
+ * the struct kbase_context (itself located within the kctx->jctx.sched_info structure).
+ */
+int kbasep_js_policy_init_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Terminate resources associated with using a ctx in the Job Scheduler
+ * Policy.
+ */
+void kbasep_js_policy_term_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
+
+/**
+ * @brief Enqueue a context onto the Job Scheduler Policy Queue
+ *
+ * If the context enqueued has a priority higher than any in the Run Pool, then
+ * it is the Policy's responsibility to decide whether to schedule out a low
+ * priority context from the Run Pool to allow the high priority context to be
+ * scheduled in.
+ *
+ * If the context has the privileged flag set, it will always be kept at the
+ * head of the queue.
+ *
+ * The caller will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * The caller will be holding kbasep_js_device_data::queue_mutex.
+ */
+void kbasep_js_policy_enqueue_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
+
+/**
+ * @brief Dequeue a context from the Head of the Job Scheduler Policy Queue
+ *
+ * The caller will be holding kbasep_js_device_data::queue_mutex.
+ *
+ * @return true if a context was available, and *kctx_ptr points to
+ * the kctx dequeued.
+ * @return false if no contexts were available.
+ */
+bool kbasep_js_policy_dequeue_head_ctx(union kbasep_js_policy *js_policy, struct kbase_context ** const kctx_ptr);
+
+/**
+ * @brief Evict a context from the Job Scheduler Policy Queue
+ *
+ * This is only called as part of destroying a kbase_context.
+ *
+ * There are many reasons why this might fail during the lifetime of a
+ * context. For example, the context is in the process of being scheduled. In
+ * that case a thread doing the scheduling might have a pointer to it, but the
+ * context is neither in the Policy Queue, nor is it in the Run
+ * Pool. Crucially, neither the Policy Queue, Run Pool, or the Context itself
+ * are locked.
+ *
+ * Hence to find out where in the system the context is, it is important to do
+ * more than just check the kbasep_js_kctx_info::ctx::is_scheduled member.
+ *
+ * The caller will be holding kbasep_js_device_data::queue_mutex.
+ *
+ * @return true if the context was evicted from the Policy Queue
+ * @return false if the context was not found in the Policy Queue
+ */
+bool kbasep_js_policy_try_evict_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
+
+/**
+ * @brief Call a function on all jobs belonging to a non-queued, non-running
+ * context, optionally detaching the jobs from the context as it goes.
+ *
+ * At the time of the call, the context is guarenteed to be not-currently
+ * scheduled on the Run Pool (is_scheduled == false), and not present in
+ * the Policy Queue. This is because one of the following functions was used
+ * recently on the context:
+ * - kbasep_js_policy_evict_ctx()
+ * - kbasep_js_policy_runpool_remove_ctx()
+ *
+ * In both cases, no subsequent call was made on the context to any of:
+ * - kbasep_js_policy_runpool_add_ctx()
+ * - kbasep_js_policy_enqueue_ctx()
+ *
+ * Due to the locks that might be held at the time of the call, the callback
+ * may need to defer work on a workqueue to complete its actions (e.g. when
+ * cancelling jobs)
+ *
+ * \a detach_jobs must only be set when cancelling jobs (which occurs as part
+ * of context destruction).
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ */
+void kbasep_js_policy_foreach_ctx_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx,
+ kbasep_js_policy_ctx_job_cb callback, bool detach_jobs);
+
+/**
+ * @brief Add a context to the Job Scheduler Policy's Run Pool
+ *
+ * If the context enqueued has a priority higher than any in the Run Pool, then
+ * it is the Policy's responsibility to decide whether to schedule out low
+ * priority jobs that are currently running on the GPU.
+ *
+ * The number of contexts present in the Run Pool will never be more than the
+ * number of Address Spaces.
+ *
+ * The following guarentees are made about the state of the system when this
+ * is called:
+ * - kctx->as_nr member is valid
+ * - the context has its submit_allowed flag set
+ * - kbasep_js_device_data::runpool_irq::per_as_data[kctx->as_nr] is valid
+ * - The refcount of the context is guarenteed to be zero.
+ * - kbasep_js_kctx_info::ctx::is_scheduled will be true.
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it will be holding kbasep_js_device_data::runpool_mutex.
+ * - it will be holding hwaccess_lock (a spinlock)
+ *
+ * Due to a spinlock being held, this function must not call any APIs that sleep.
+ */
+void kbasep_js_policy_runpool_add_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
+
+/**
+ * @brief Remove a context from the Job Scheduler Policy's Run Pool
+ *
+ * The kctx->as_nr member is valid and the context has its submit_allowed flag
+ * set when this is called. The state of
+ * kbasep_js_device_data::runpool_irq::per_as_data[kctx->as_nr] is also
+ * valid. The refcount of the context is guarenteed to be zero.
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it will be holding kbasep_js_device_data::runpool_mutex.
+ * - it will be holding hwaccess_lock (a spinlock)
+ *
+ * Due to a spinlock being held, this function must not call any APIs that sleep.
+ */
+void kbasep_js_policy_runpool_remove_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
+
+/**
+ * @brief Indicate whether a context should be removed from the Run Pool
+ * (should be scheduled out).
+ *
+ * The hwaccess_lock will be held by the caller.
+ *
+ * @note This API is called from IRQ context.
+ */
+bool kbasep_js_policy_should_remove_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
+
+/**
+ * @brief Synchronize with any timers acting upon the runpool
+ *
+ * The policy should check whether any timers it owns should be running. If
+ * they should not, the policy must cancel such timers and ensure they are not
+ * re-run by the time this function finishes.
+ *
+ * In particular, the timers must not be running when there are no more contexts
+ * on the runpool, because the GPU could be powered off soon after this call.
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it will be holding kbasep_js_device_data::runpool_mutex.
+ */
+void kbasep_js_policy_runpool_timers_sync(union kbasep_js_policy *js_policy);
+
+
+/**
+ * @brief Indicate whether a new context has an higher priority than the current context.
+ *
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held for \a new_ctx
+ *
+ * This function must not sleep, because an IRQ spinlock might be held whilst
+ * this is called.
+ *
+ * @note There is nothing to stop the priority of \a current_ctx changing
+ * during or immediately after this function is called (because its jsctx_mutex
+ * cannot be held). Therefore, this function should only be seen as a heuristic
+ * guide as to whether \a new_ctx is higher priority than \a current_ctx
+ */
+bool kbasep_js_policy_ctx_has_priority(union kbasep_js_policy *js_policy, struct kbase_context *current_ctx, struct kbase_context *new_ctx);
+
+ /** @} *//* end group kbase_js_policy_ctx */
+
+/**
+ * @addtogroup kbase_js_policy_job Job Scheduler Policy, Job Chain Management API
+ * @{
+ *
+ * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
+ * the Job Scheduler Policy and its use from the Job Scheduler Core</b>.
+ */
+
+/**
+ * @brief Job Scheduler Policy Job Info structure
+ *
+ * This structure is embedded in the struct kbase_jd_atom structure. It is used to:
+ * - track information needed for the policy to schedule the job (e.g. time
+ * used, etc.)
+ * - link together jobs into a queue/buffer, so that a struct kbase_jd_atom can be
+ * obtained as the container of the policy job info. This allows the API to
+ * return what "the next job" should be.
+ */
+union kbasep_js_policy_job_info;
+
+/**
+ * @brief Initialize a job for use with the Job Scheduler Policy
+ *
+ * This function initializes the union kbasep_js_policy_job_info structure within the
+ * kbase_jd_atom. It will only initialize/allocate resources that are specific
+ * to the job.
+ *
+ * That is, this function makes \b no attempt to:
+ * - initialize any context/policy-wide information
+ * - enqueue the job on the policy.
+ *
+ * At some later point, the following functions must be called on the job, in this order:
+ * - kbasep_js_policy_register_job() to register the job and initialize policy/context wide data.
+ * - kbasep_js_policy_enqueue_job() to enqueue the job
+ *
+ * A job must only ever be initialized on the Policy once, and must be
+ * terminated on the Policy before the job is freed.
+ *
+ * The caller will not be holding any locks, and so this function will not
+ * modify any information in \a kctx or \a js_policy.
+ *
+ * @return 0 if initialization was correct.
+ */
+int kbasep_js_policy_init_job(const union kbasep_js_policy *js_policy, const struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Register context/policy-wide information for a job on the Job Scheduler Policy.
+ *
+ * Registers the job with the policy. This is used to track the job before it
+ * has been enqueued/requeued by kbasep_js_policy_enqueue_job(). Specifically,
+ * it is used to update information under a lock that could not be updated at
+ * kbasep_js_policy_init_job() time (such as context/policy-wide data).
+ *
+ * @note This function will not fail, and hence does not allocate any
+ * resources. Any failures that could occur on registration will be caught
+ * during kbasep_js_policy_init_job() instead.
+ *
+ * A job must only ever be registerd on the Policy once, and must be
+ * deregistered on the Policy on completion (whether or not that completion was
+ * success/failure).
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
+ */
+void kbasep_js_policy_register_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * @brief De-register context/policy-wide information for a on the Job Scheduler Policy.
+ *
+ * This must be used before terminating the resources associated with using a
+ * job in the Job Scheduler Policy. This function does not itself terminate any
+ * resources, at most it just updates information in the policy and context.
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
+ */
+void kbasep_js_policy_deregister_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Dequeue a Job for a job slot from the Job Scheduler Policy Run Pool
+ *
+ * The job returned by the policy will match at least one of the bits in the
+ * job slot's core requirements (but it may match more than one, or all @ref
+ * base_jd_core_req bits supported by the job slot).
+ *
+ * In addition, the requirements of the job returned will be a subset of those
+ * requested - the job returned will not have requirements that \a job_slot_idx
+ * cannot satisfy.
+ *
+ * The caller will submit the job to the GPU as soon as the GPU's NEXT register
+ * for the corresponding slot is empty. Of course, the GPU will then only run
+ * this new job when the currently executing job (in the jobslot's HEAD
+ * register) has completed.
+ *
+ * @return true if a job was available, and *kctx_ptr points to
+ * the kctx dequeued.
+ * @return false if no jobs were available among all ctxs in the Run Pool.
+ *
+ * @note base_jd_core_req is currently a u8 - beware of type conversion.
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_device_data::runpool_lock::irq will be held.
+ * - kbasep_js_device_data::runpool_mutex will be held.
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex. will be held
+ */
+bool kbasep_js_policy_dequeue_job(struct kbase_device *kbdev, int job_slot_idx, struct kbase_jd_atom ** const katom_ptr);
+
+/**
+ * @brief Requeue a Job back into the Job Scheduler Policy Run Pool
+ *
+ * This will be used to enqueue a job after its creation and also to requeue
+ * a job into the Run Pool that was previously dequeued (running). It notifies
+ * the policy that the job should be run again at some point later.
+ *
+ * The caller has the following conditions on locking:
+ * - hwaccess_lock (a spinlock) will be held.
+ * - kbasep_js_device_data::runpool_mutex will be held.
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
+ */
+void kbasep_js_policy_enqueue_job(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Log the result of a job: the time spent on a job/context, and whether
+ * the job failed or not.
+ *
+ * Since a struct kbase_jd_atom contains a pointer to the struct kbase_context owning it,
+ * then this can also be used to log time on either/both the job and the
+ * containing context.
+ *
+ * The completion state of the job can be found by examining \a katom->event.event_code
+ *
+ * If the Job failed and the policy is implementing fair-sharing, then the
+ * policy must penalize the failing job/context:
+ * - At the very least, it should penalize the time taken by the amount of
+ * time spent processing the IRQ in SW. This because a job in the NEXT slot
+ * waiting to run will be delayed until the failing job has had the IRQ
+ * cleared.
+ * - \b Optionally, the policy could apply other penalties. For example, based
+ * on a threshold of a number of failing jobs, after which a large penalty is
+ * applied.
+ *
+ * The kbasep_js_device_data::runpool_mutex will be held by the caller.
+ *
+ * @note This API is called from IRQ context.
+ *
+ * The caller has the following conditions on locking:
+ * - hwaccess_lock will be held.
+ *
+ * @param js_policy job scheduler policy
+ * @param katom job dispatch atom
+ * @param time_spent_us the time spent by the job, in microseconds (10^-6 seconds).
+ */
+void kbasep_js_policy_log_job_result(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom, u64 time_spent_us);
+
+ /** @} *//* end group kbase_js_policy_job */
+
+ /** @} *//* end group kbase_js_policy */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_POLICY_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_js_policy_cfs.c b/midgard-0.r2p0/mali_kbase_js_policy_cfs.c
new file mode 100755
index 0000000..1ac0569
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_js_policy_cfs.c
@@ -0,0 +1,292 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Job Scheduler: Completely Fair Policy Implementation
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_js.h>
+#include <mali_kbase_js_policy_cfs.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+#include <linux/sched/rt.h>
+#endif
+
+/**
+ * Define for when dumping is enabled.
+ * This should not be based on the instrumentation level as whether dumping is enabled for a particular level is down to the integrator.
+ * However this is being used for now as otherwise the cinstr headers would be needed.
+ */
+#define CINSTR_DUMPING_ENABLED (2 == MALI_INSTRUMENTATION_LEVEL)
+
+/* Fixed point constants used for runtime weight calculations */
+#define WEIGHT_FIXEDPOINT_SHIFT 10
+#define WEIGHT_TABLE_SIZE 40
+#define WEIGHT_0_NICE (WEIGHT_TABLE_SIZE/2)
+#define WEIGHT_0_VAL (1 << WEIGHT_FIXEDPOINT_SHIFT)
+
+#define PROCESS_PRIORITY_MIN (-20)
+#define PROCESS_PRIORITY_MAX (19)
+
+/* Defines for easy asserts 'is scheduled'/'is queued'/'is neither queued norscheduled' */
+#define KBASEP_JS_CHECKFLAG_QUEUED (1u << 0) /**< Check the queued state */
+#define KBASEP_JS_CHECKFLAG_SCHEDULED (1u << 1) /**< Check the scheduled state */
+#define KBASEP_JS_CHECKFLAG_IS_QUEUED (1u << 2) /**< Expect queued state to be set */
+#define KBASEP_JS_CHECKFLAG_IS_SCHEDULED (1u << 3) /**< Expect scheduled state to be set */
+
+enum {
+ KBASEP_JS_CHECK_NOTQUEUED = KBASEP_JS_CHECKFLAG_QUEUED,
+ KBASEP_JS_CHECK_NOTSCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED,
+ KBASEP_JS_CHECK_QUEUED = KBASEP_JS_CHECKFLAG_QUEUED | KBASEP_JS_CHECKFLAG_IS_QUEUED,
+ KBASEP_JS_CHECK_SCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED | KBASEP_JS_CHECKFLAG_IS_SCHEDULED
+};
+
+typedef u32 kbasep_js_check;
+
+/*
+ * Private Functions
+ */
+
+/* Table autogenerated using util built from: base/tools/gen_cfs_weight_of_prio/ */
+
+/* weight = 1.25 */
+static const int weight_of_priority[] = {
+ /* -20 */ 11, 14, 18, 23,
+ /* -16 */ 29, 36, 45, 56,
+ /* -12 */ 70, 88, 110, 137,
+ /* -8 */ 171, 214, 268, 335,
+ /* -4 */ 419, 524, 655, 819,
+ /* 0 */ 1024, 1280, 1600, 2000,
+ /* 4 */ 2500, 3125, 3906, 4883,
+ /* 8 */ 6104, 7630, 9538, 11923,
+ /* 12 */ 14904, 18630, 23288, 29110,
+ /* 16 */ 36388, 45485, 56856, 71070
+};
+
+/*
+ * Note: There is nothing to stop the priority of the ctx containing
+ * ctx_info changing during or immediately after this function is called
+ * (because its jsctx_mutex cannot be held during IRQ). Therefore, this
+ * function should only be seen as a heuristic guide as to the priority weight
+ * of the context.
+ */
+static u64 priority_weight(struct kbasep_js_policy_cfs_ctx *ctx_info, u64 time_us)
+{
+ u64 time_delta_us;
+ int priority;
+
+ priority = ctx_info->process_priority;
+
+ /* Adjust runtime_us using priority weight if required */
+ if (priority != 0 && time_us != 0) {
+ int clamped_priority;
+
+ /* Clamp values to min..max weights */
+ if (priority > PROCESS_PRIORITY_MAX)
+ clamped_priority = PROCESS_PRIORITY_MAX;
+ else if (priority < PROCESS_PRIORITY_MIN)
+ clamped_priority = PROCESS_PRIORITY_MIN;
+ else
+ clamped_priority = priority;
+
+ /* Fixed point multiplication */
+ time_delta_us = (time_us * weight_of_priority[WEIGHT_0_NICE + clamped_priority]);
+ /* Remove fraction */
+ time_delta_us = time_delta_us >> WEIGHT_FIXEDPOINT_SHIFT;
+ /* Make sure the time always increases */
+ if (0 == time_delta_us)
+ time_delta_us++;
+ } else {
+ time_delta_us = time_us;
+ }
+
+ return time_delta_us;
+}
+
+#if KBASE_TRACE_ENABLE
+static int kbasep_js_policy_trace_get_refcnt_nolock(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ int as_nr;
+ int refcnt = 0;
+
+ js_devdata = &kbdev->js_data;
+
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ struct kbasep_js_per_as_data *js_per_as_data;
+
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ refcnt = js_per_as_data->as_busy_refcount;
+ }
+
+ return refcnt;
+}
+
+static inline int kbasep_js_policy_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ unsigned long flags;
+ struct kbasep_js_device_data *js_devdata;
+ int refcnt = 0;
+
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ refcnt = kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return refcnt;
+}
+#else /* KBASE_TRACE_ENABLE */
+static inline int kbasep_js_policy_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(kctx);
+ return 0;
+}
+#endif /* KBASE_TRACE_ENABLE */
+
+
+/*
+ * Non-private functions
+ */
+
+int kbasep_js_policy_init(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_policy_cfs *policy_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+ policy_info = &js_devdata->policy.cfs;
+
+ atomic64_set(&policy_info->least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
+ atomic64_set(&policy_info->rt_least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
+
+ policy_info->head_runtime_us = 0;
+
+ return 0;
+}
+
+void kbasep_js_policy_term(union kbasep_js_policy *js_policy)
+{
+ CSTD_UNUSED(js_policy);
+}
+
+int kbasep_js_policy_init_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+ struct kbasep_js_device_data *js_devdata;
+ struct kbasep_js_policy_cfs_ctx *ctx_info;
+ struct kbasep_js_policy_cfs *policy_info;
+ int policy;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ policy_info = &kbdev->js_data.policy.cfs;
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_INIT_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
+
+ policy = current->policy;
+ if (policy == SCHED_FIFO || policy == SCHED_RR) {
+ ctx_info->process_rt_policy = true;
+ ctx_info->process_priority = (((MAX_RT_PRIO - 1) - current->rt_priority) / 5) - 20;
+ } else {
+ ctx_info->process_rt_policy = false;
+ ctx_info->process_priority = (current->static_prio - MAX_RT_PRIO) - 20;
+ }
+
+ /* Initial runtime (relative to least-run context runtime)
+ *
+ * This uses the Policy Queue's most up-to-date head_runtime_us by using the
+ * queue mutex to issue memory barriers - also ensure future updates to
+ * head_runtime_us occur strictly after this context is initialized */
+ mutex_lock(&js_devdata->queue_mutex);
+
+ /* No need to hold the the hwaccess_lock here, because we're initializing
+ * the value, and the context is definitely not being updated in the
+ * runpool at this point. The queue_mutex ensures the memory barrier. */
+ ctx_info->runtime_us = policy_info->head_runtime_us + priority_weight(ctx_info, (u64) js_devdata->cfs_ctx_runtime_init_slices * (u64) (js_devdata->ctx_timeslice_ns / 1000u));
+
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ return 0;
+}
+
+void kbasep_js_policy_term_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_TERM_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
+
+ /* No work to do */
+}
+
+/*
+ * Job Chain Management
+ */
+
+void kbasep_js_policy_log_job_result(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom, u64 time_spent_us)
+{
+ struct kbasep_js_policy_cfs_ctx *ctx_info;
+ struct kbase_context *parent_ctx;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ CSTD_UNUSED(js_policy);
+
+ parent_ctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(parent_ctx != NULL);
+
+ ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ ctx_info->runtime_us += priority_weight(ctx_info, time_spent_us);
+
+ katom->time_spent_us += time_spent_us;
+}
+
+bool kbasep_js_policy_ctx_has_priority(union kbasep_js_policy *js_policy, struct kbase_context *current_ctx, struct kbase_context *new_ctx)
+{
+ struct kbasep_js_policy_cfs_ctx *current_ctx_info;
+ struct kbasep_js_policy_cfs_ctx *new_ctx_info;
+
+ KBASE_DEBUG_ASSERT(current_ctx != NULL);
+ KBASE_DEBUG_ASSERT(new_ctx != NULL);
+ CSTD_UNUSED(js_policy);
+
+ current_ctx_info = &current_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
+ new_ctx_info = &new_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ if (!current_ctx_info->process_rt_policy && new_ctx_info->process_rt_policy)
+ return true;
+
+ if (current_ctx_info->process_rt_policy ==
+ new_ctx_info->process_rt_policy)
+ return true;
+
+ return false;
+}
diff --git a/midgard-0.r2p0/mali_kbase_js_policy_cfs.h b/midgard-0.r2p0/mali_kbase_js_policy_cfs.h
new file mode 100755
index 0000000..0a8454c
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_js_policy_cfs.h
@@ -0,0 +1,81 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/*
+ * Completely Fair Job Scheduler Policy structure definitions
+ */
+
+#ifndef _KBASE_JS_POLICY_CFS_H_
+#define _KBASE_JS_POLICY_CFS_H_
+
+#define KBASEP_JS_RUNTIME_EMPTY ((u64)-1)
+
+/**
+ * struct kbasep_js_policy_cfs - Data for the CFS policy
+ * @head_runtime_us: Number of microseconds the least-run context has been
+ * running for. The kbasep_js_device_data.queue_mutex must
+ * be held whilst updating this.
+ * Reads are possible without this mutex, but an older value
+ * might be read if no memory barries are issued beforehand.
+ * @least_runtime_us: Number of microseconds the least-run context in the
+ * context queue has been running for.
+ * -1 if context queue is empty.
+ * @rt_least_runtime_us: Number of microseconds the least-run context in the
+ * realtime (priority) context queue has been running for.
+ * -1 if realtime context queue is empty
+ */
+struct kbasep_js_policy_cfs {
+ u64 head_runtime_us;
+ atomic64_t least_runtime_us;
+ atomic64_t rt_least_runtime_us;
+};
+
+/**
+ * struct kbasep_js_policy_cfs_ctx - a single linked list of all contexts
+ * @runtime_us: microseconds this context has been running for
+ * @process_rt_policy: set if calling process policy scheme is a realtime
+ * scheduler and will use the priority queue. Non-mutable
+ * after ctx init
+ * @process_priority: calling process NICE priority, in the range -20..19
+ *
+ * hwaccess_lock must be held when updating @runtime_us. Initializing will occur
+ * on context init and context enqueue (which can only occur in one thread at a
+ * time), but multi-thread access only occurs while the context is in the
+ * runpool.
+ *
+ * Reads are possible without the spinlock, but an older value might be read if
+ * no memory barries are issued beforehand.
+ */
+struct kbasep_js_policy_cfs_ctx {
+ u64 runtime_us;
+ bool process_rt_policy;
+ int process_priority;
+};
+
+/**
+ * struct kbasep_js_policy_cfs_job - per job information for CFS
+ * @ticks: number of ticks that this job has been executing for
+ *
+ * hwaccess_lock must be held when accessing @ticks.
+ */
+struct kbasep_js_policy_cfs_job {
+ u32 ticks;
+};
+
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_linux.h b/midgard-0.r2p0/mali_kbase_linux.h
new file mode 100755
index 0000000..6d1e61f
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_linux.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_linux.h
+ * Base kernel APIs, Linux implementation.
+ */
+
+#ifndef _KBASE_LINUX_H_
+#define _KBASE_LINUX_H_
+
+/* All things that are needed for the Linux port. */
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+
+#if (defined(MALI_KERNEL_TEST_API) && (1 == MALI_KERNEL_TEST_API))
+ #define KBASE_EXPORT_TEST_API(func) EXPORT_SYMBOL(func)
+#else
+ #define KBASE_EXPORT_TEST_API(func)
+#endif
+
+#define KBASE_EXPORT_SYMBOL(func) EXPORT_SYMBOL(func)
+
+#endif /* _KBASE_LINUX_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_mem.c b/midgard-0.r2p0/mali_kbase_mem.c
new file mode 100755
index 0000000..60d31ac
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem.c
@@ -0,0 +1,2490 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem.c
+ * Base kernel memory APIs
+ */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+#ifdef CONFIG_UMP
+#include <linux/ump.h>
+#endif /* CONFIG_UMP */
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+
+#include <mali_kbase_config.h>
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_cache_policy.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_tlstream.h>
+
+/**
+ * @brief Check the zone compatibility of two regions.
+ */
+static int kbase_region_tracker_match_zone(struct kbase_va_region *reg1,
+ struct kbase_va_region *reg2)
+{
+ return ((reg1->flags & KBASE_REG_ZONE_MASK) ==
+ (reg2->flags & KBASE_REG_ZONE_MASK));
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_match_zone);
+
+/* This function inserts a region into the tree. */
+static void kbase_region_tracker_insert(struct kbase_context *kctx, struct kbase_va_region *new_reg)
+{
+ u64 start_pfn = new_reg->start_pfn;
+ struct rb_node **link = &(kctx->reg_rbtree.rb_node);
+ struct rb_node *parent = NULL;
+
+ /* Find the right place in the tree using tree search */
+ while (*link) {
+ struct kbase_va_region *old_reg;
+
+ parent = *link;
+ old_reg = rb_entry(parent, struct kbase_va_region, rblink);
+
+ /* RBTree requires no duplicate entries. */
+ KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
+
+ if (old_reg->start_pfn > start_pfn)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ /* Put the new node there, and rebalance tree */
+ rb_link_node(&(new_reg->rblink), parent, link);
+ rb_insert_color(&(new_reg->rblink), &(kctx->reg_rbtree));
+}
+
+/* Find allocated region enclosing free range. */
+static struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free(
+ struct kbase_context *kctx, u64 start_pfn, size_t nr_pages)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+
+ u64 end_pfn = start_pfn + nr_pages;
+
+ rbnode = kctx->reg_rbtree.rb_node;
+ while (rbnode) {
+ u64 tmp_start_pfn, tmp_end_pfn;
+
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ tmp_start_pfn = reg->start_pfn;
+ tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+ /* If start is lower than this, go left. */
+ if (start_pfn < tmp_start_pfn)
+ rbnode = rbnode->rb_left;
+ /* If end is higher than this, then go right. */
+ else if (end_pfn > tmp_end_pfn)
+ rbnode = rbnode->rb_right;
+ else /* Enclosing */
+ return reg;
+ }
+
+ return NULL;
+}
+
+/* Find region enclosing given address. */
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ rbnode = kctx->reg_rbtree.rb_node;
+ while (rbnode) {
+ u64 tmp_start_pfn, tmp_end_pfn;
+
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ tmp_start_pfn = reg->start_pfn;
+ tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+ /* If start is lower than this, go left. */
+ if (gpu_pfn < tmp_start_pfn)
+ rbnode = rbnode->rb_left;
+ /* If end is higher than this, then go right. */
+ else if (gpu_pfn >= tmp_end_pfn)
+ rbnode = rbnode->rb_right;
+ else /* Enclosing */
+ return reg;
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address);
+
+/* Find region with given base address */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr)
+{
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ rbnode = kctx->reg_rbtree.rb_node;
+ while (rbnode) {
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ if (reg->start_pfn > gpu_pfn)
+ rbnode = rbnode->rb_left;
+ else if (reg->start_pfn < gpu_pfn)
+ rbnode = rbnode->rb_right;
+ else
+ return reg;
+
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address);
+
+/* Find region meeting given requirements */
+static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+
+ /* Note that this search is a linear search, as we do not have a target
+ address in mind, so does not benefit from the rbtree search */
+ rbnode = rb_first(&(kctx->reg_rbtree));
+ while (rbnode) {
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ if ((reg->nr_pages >= nr_pages) &&
+ (reg->flags & KBASE_REG_FREE) &&
+ kbase_region_tracker_match_zone(reg, reg_reqs)) {
+ /* Check alignment */
+ u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1);
+
+ if ((start_pfn >= reg->start_pfn) &&
+ (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
+ ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1)))
+ return reg;
+ }
+ rbnode = rb_next(rbnode);
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Remove a region object from the global list.
+ *
+ * The region reg is removed, possibly by merging with other free and
+ * compatible adjacent regions. It must be called with the context
+ * region lock held. The associated memory is not released (see
+ * kbase_free_alloced_region). Internal use only.
+ */
+static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ struct rb_node *rbprev;
+ struct kbase_va_region *prev = NULL;
+ struct rb_node *rbnext;
+ struct kbase_va_region *next = NULL;
+
+ int merged_front = 0;
+ int merged_back = 0;
+ int err = 0;
+
+ /* Try to merge with the previous block first */
+ rbprev = rb_prev(&(reg->rblink));
+ if (rbprev) {
+ prev = rb_entry(rbprev, struct kbase_va_region, rblink);
+ if ((prev->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(prev, reg)) {
+ /* We're compatible with the previous VMA, merge with it */
+ prev->nr_pages += reg->nr_pages;
+ rb_erase(&(reg->rblink), &kctx->reg_rbtree);
+ reg = prev;
+ merged_front = 1;
+ }
+ }
+
+ /* Try to merge with the next block second */
+ /* Note we do the lookup here as the tree may have been rebalanced. */
+ rbnext = rb_next(&(reg->rblink));
+ if (rbnext) {
+ /* We're compatible with the next VMA, merge with it */
+ next = rb_entry(rbnext, struct kbase_va_region, rblink);
+ if ((next->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(next, reg)) {
+ next->start_pfn = reg->start_pfn;
+ next->nr_pages += reg->nr_pages;
+ rb_erase(&(reg->rblink), &kctx->reg_rbtree);
+ merged_back = 1;
+ if (merged_front) {
+ /* We already merged with prev, free it */
+ kbase_free_alloced_region(reg);
+ }
+ }
+ }
+
+ /* If we failed to merge then we need to add a new block */
+ if (!(merged_front || merged_back)) {
+ /*
+ * We didn't merge anything. Add a new free
+ * placeholder and remove the original one.
+ */
+ struct kbase_va_region *free_reg;
+
+ free_reg = kbase_alloc_free_region(kctx, reg->start_pfn, reg->nr_pages, reg->flags & KBASE_REG_ZONE_MASK);
+ if (!free_reg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rb_replace_node(&(reg->rblink), &(free_reg->rblink), &(kctx->reg_rbtree));
+ }
+
+ out:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_remove_va_region);
+
+/**
+ * @brief Insert a VA region to the list, replacing the current at_reg.
+ */
+static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
+{
+ int err = 0;
+
+ /* Must be a free region */
+ KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0);
+ /* start_pfn should be contained within at_reg */
+ KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages));
+ /* at least nr_pages from start_pfn should be contained within at_reg */
+ KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages);
+
+ new_reg->start_pfn = start_pfn;
+ new_reg->nr_pages = nr_pages;
+
+ /* Regions are a whole use, so swap and delete old one. */
+ if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
+ rb_replace_node(&(at_reg->rblink), &(new_reg->rblink), &(kctx->reg_rbtree));
+ kbase_free_alloced_region(at_reg);
+ }
+ /* New region replaces the start of the old one, so insert before. */
+ else if (at_reg->start_pfn == start_pfn) {
+ at_reg->start_pfn += nr_pages;
+ KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages);
+ at_reg->nr_pages -= nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_reg);
+ }
+ /* New region replaces the end of the old one, so insert after. */
+ else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) {
+ at_reg->nr_pages -= nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_reg);
+ }
+ /* New region splits the old one, so insert and create new */
+ else {
+ struct kbase_va_region *new_front_reg;
+
+ new_front_reg = kbase_alloc_free_region(kctx,
+ at_reg->start_pfn,
+ start_pfn - at_reg->start_pfn,
+ at_reg->flags & KBASE_REG_ZONE_MASK);
+
+ if (new_front_reg) {
+ at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
+ at_reg->start_pfn = start_pfn + nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_front_reg);
+ kbase_region_tracker_insert(kctx, new_reg);
+ } else {
+ err = -ENOMEM;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * @brief Add a VA region to the list.
+ */
+int kbase_add_va_region(struct kbase_context *kctx,
+ struct kbase_va_region *reg, u64 addr,
+ size_t nr_pages, size_t align)
+{
+ struct kbase_va_region *tmp;
+ u64 gpu_pfn = addr >> PAGE_SHIFT;
+ int err = 0;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ if (!align)
+ align = 1;
+
+ /* must be a power of 2 */
+ KBASE_DEBUG_ASSERT((align & (align - 1)) == 0);
+ KBASE_DEBUG_ASSERT(nr_pages > 0);
+
+ /* Path 1: Map a specific address. Find the enclosing region, which *must* be free. */
+ if (gpu_pfn) {
+ struct device *dev = kctx->kbdev->dev;
+
+ KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
+
+ tmp = kbase_region_tracker_find_region_enclosing_range_free(kctx, gpu_pfn, nr_pages);
+ if (!tmp) {
+ dev_warn(dev, "Enclosing region not found: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ if ((!kbase_region_tracker_match_zone(tmp, reg)) ||
+ (!(tmp->flags & KBASE_REG_FREE))) {
+ dev_warn(dev, "Zone mismatch: %lu != %lu", tmp->flags & KBASE_REG_ZONE_MASK, reg->flags & KBASE_REG_ZONE_MASK);
+ dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n", tmp->start_pfn, tmp->flags, tmp->nr_pages, gpu_pfn, nr_pages);
+ dev_warn(dev, "in function %s (%p, %p, 0x%llx, 0x%zx, 0x%zx)\n", __func__, kctx, reg, addr, nr_pages, align);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = kbase_insert_va_region_nolock(kctx, reg, tmp, gpu_pfn, nr_pages);
+ if (err) {
+ dev_warn(dev, "Failed to insert va region");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ goto exit;
+ }
+
+ /* Path 2: Map any free address which meets the requirements. */
+ {
+ u64 start_pfn;
+
+ /*
+ * Depending on the zone the allocation request is for
+ * we might need to retry it.
+ */
+ do {
+ tmp = kbase_region_tracker_find_region_meeting_reqs(
+ kctx, reg, nr_pages, align);
+ if (tmp) {
+ start_pfn = (tmp->start_pfn + align - 1) &
+ ~(align - 1);
+ err = kbase_insert_va_region_nolock(kctx, reg,
+ tmp, start_pfn, nr_pages);
+ break;
+ }
+
+ /*
+ * If the allocation is not from the same zone as JIT
+ * then don't retry, we're out of VA and there is
+ * nothing which can be done about it.
+ */
+ if ((reg->flags & KBASE_REG_ZONE_MASK) !=
+ KBASE_REG_ZONE_CUSTOM_VA)
+ break;
+ } while (kbase_jit_evict(kctx));
+
+ if (!tmp)
+ err = -ENOMEM;
+ }
+
+ exit:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_add_va_region);
+
+/**
+ * @brief Initialize the internal region tracker data structure.
+ */
+static void kbase_region_tracker_ds_init(struct kbase_context *kctx,
+ struct kbase_va_region *same_va_reg,
+ struct kbase_va_region *exec_reg,
+ struct kbase_va_region *custom_va_reg)
+{
+ kctx->reg_rbtree = RB_ROOT;
+ kbase_region_tracker_insert(kctx, same_va_reg);
+
+ /* exec and custom_va_reg doesn't always exist */
+ if (exec_reg && custom_va_reg) {
+ kbase_region_tracker_insert(kctx, exec_reg);
+ kbase_region_tracker_insert(kctx, custom_va_reg);
+ }
+}
+
+void kbase_region_tracker_term(struct kbase_context *kctx)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+
+ do {
+ rbnode = rb_first(&(kctx->reg_rbtree));
+ if (rbnode) {
+ rb_erase(rbnode, &(kctx->reg_rbtree));
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ kbase_free_alloced_region(reg);
+ }
+ } while (rbnode);
+}
+
+/**
+ * Initialize the region tracker data structure.
+ */
+int kbase_region_tracker_init(struct kbase_context *kctx)
+{
+ struct kbase_va_region *same_va_reg;
+ struct kbase_va_region *exec_reg = NULL;
+ struct kbase_va_region *custom_va_reg = NULL;
+ size_t same_va_bits = sizeof(void *) * BITS_PER_BYTE;
+ u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
+ u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT;
+ u64 same_va_pages;
+ int err;
+
+ /* Take the lock as kbase_free_alloced_region requires it */
+ kbase_gpu_vm_lock(kctx);
+
+#if defined(CONFIG_ARM64)
+ same_va_bits = VA_BITS;
+#elif defined(CONFIG_X86_64)
+ same_va_bits = 47;
+#elif defined(CONFIG_64BIT)
+#error Unsupported 64-bit architecture
+#endif
+
+#ifdef CONFIG_64BIT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ same_va_bits = 32;
+ else if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
+ same_va_bits = 33;
+#endif
+
+ if (kctx->kbdev->gpu_props.mmu.va_bits < same_va_bits) {
+ err = -EINVAL;
+ goto fail_unlock;
+ }
+
+ same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
+ /* all have SAME_VA */
+ same_va_reg = kbase_alloc_free_region(kctx, 1,
+ same_va_pages,
+ KBASE_REG_ZONE_SAME_VA);
+
+ if (!same_va_reg) {
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+#ifdef CONFIG_64BIT
+ /* 32-bit clients have exec and custom VA zones */
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+#endif
+ if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) {
+ err = -EINVAL;
+ goto fail_free_same_va;
+ }
+ /* If the current size of TMEM is out of range of the
+ * virtual address space addressable by the MMU then
+ * we should shrink it to fit
+ */
+ if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit)
+ custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE;
+
+ exec_reg = kbase_alloc_free_region(kctx,
+ KBASE_REG_ZONE_EXEC_BASE,
+ KBASE_REG_ZONE_EXEC_SIZE,
+ KBASE_REG_ZONE_EXEC);
+
+ if (!exec_reg) {
+ err = -ENOMEM;
+ goto fail_free_same_va;
+ }
+
+ custom_va_reg = kbase_alloc_free_region(kctx,
+ KBASE_REG_ZONE_CUSTOM_VA_BASE,
+ custom_va_size, KBASE_REG_ZONE_CUSTOM_VA);
+
+ if (!custom_va_reg) {
+ err = -ENOMEM;
+ goto fail_free_exec;
+ }
+#ifdef CONFIG_64BIT
+ }
+#endif
+
+ kbase_region_tracker_ds_init(kctx, same_va_reg, exec_reg, custom_va_reg);
+
+ kctx->same_va_end = same_va_pages + 1;
+
+ kbase_gpu_vm_unlock(kctx);
+ return 0;
+
+fail_free_exec:
+ kbase_free_alloced_region(exec_reg);
+fail_free_same_va:
+ kbase_free_alloced_region(same_va_reg);
+fail_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return err;
+}
+
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages)
+{
+#ifdef CONFIG_64BIT
+ struct kbase_va_region *same_va;
+ struct kbase_va_region *custom_va_reg;
+ u64 same_va_bits;
+ u64 total_va_size;
+ int err;
+
+ /*
+ * Nothing to do for 32-bit clients, JIT uses the existing
+ * custom VA zone.
+ */
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ return 0;
+
+#if defined(CONFIG_ARM64)
+ same_va_bits = VA_BITS;
+#elif defined(CONFIG_X86_64)
+ same_va_bits = 47;
+#elif defined(CONFIG_64BIT)
+#error Unsupported 64-bit architecture
+#endif
+
+ if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
+ same_va_bits = 33;
+
+ total_va_size = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
+
+ kbase_gpu_vm_lock(kctx);
+
+ /*
+ * Modify the same VA free region after creation. Be careful to ensure
+ * that allocations haven't been made as they could cause an overlap
+ * to happen with existing same VA allocations and the custom VA zone.
+ */
+ same_va = kbase_region_tracker_find_region_base_address(kctx,
+ PAGE_SIZE);
+ if (!same_va) {
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ /* The region flag or region size has changed since creation so bail. */
+ if ((!(same_va->flags & KBASE_REG_FREE)) ||
+ (same_va->nr_pages != total_va_size)) {
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ if (same_va->nr_pages < jit_va_pages ||
+ kctx->same_va_end < jit_va_pages) {
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ /* It's safe to adjust the same VA zone now */
+ same_va->nr_pages -= jit_va_pages;
+ kctx->same_va_end -= jit_va_pages;
+
+ /*
+ * Create a custom VA zone at the end of the VA for allocations which
+ * JIT can use so it doesn't have to allocate VA from the kernel.
+ */
+ custom_va_reg = kbase_alloc_free_region(kctx,
+ kctx->same_va_end,
+ jit_va_pages,
+ KBASE_REG_ZONE_CUSTOM_VA);
+ if (!custom_va_reg) {
+ /*
+ * The context will be destroyed if we fail here so no point
+ * reverting the change we made to same_va.
+ */
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ kbase_region_tracker_insert(kctx, custom_va_reg);
+
+ kbase_gpu_vm_unlock(kctx);
+ return 0;
+
+fail_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return err;
+#else
+ return 0;
+#endif
+}
+
+int kbase_mem_init(struct kbase_device *kbdev)
+{
+ struct kbasep_mem_device *memdev;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ memdev = &kbdev->memdev;
+ kbdev->mem_pool_max_size_default = KBASE_MEM_POOL_MAX_SIZE_KCTX;
+
+ /* Initialize memory usage */
+ atomic_set(&memdev->used_pages, 0);
+
+ return kbase_mem_pool_init(&kbdev->mem_pool,
+ KBASE_MEM_POOL_MAX_SIZE_KBDEV, kbdev, NULL);
+}
+
+void kbase_mem_halt(struct kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbase_mem_term(struct kbase_device *kbdev)
+{
+ struct kbasep_mem_device *memdev;
+ int pages;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ memdev = &kbdev->memdev;
+
+ pages = atomic_read(&memdev->used_pages);
+ if (pages != 0)
+ dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
+
+ kbase_mem_pool_term(&kbdev->mem_pool);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_term);
+
+
+
+
+/**
+ * @brief Allocate a free region object.
+ *
+ * The allocated object is not part of any list yet, and is flagged as
+ * KBASE_REG_FREE. No mapping is allocated yet.
+ *
+ * zone is KBASE_REG_ZONE_CUSTOM_VA, KBASE_REG_ZONE_SAME_VA, or KBASE_REG_ZONE_EXEC
+ *
+ */
+struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone)
+{
+ struct kbase_va_region *new_reg;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ /* zone argument should only contain zone related region flags */
+ KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0);
+ KBASE_DEBUG_ASSERT(nr_pages > 0);
+ /* 64-bit address range is the max */
+ KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (U64_MAX / PAGE_SIZE));
+
+ new_reg = kzalloc(sizeof(*new_reg), GFP_KERNEL);
+
+ if (!new_reg)
+ return NULL;
+
+ new_reg->cpu_alloc = NULL; /* no alloc bound yet */
+ new_reg->gpu_alloc = NULL; /* no alloc bound yet */
+ new_reg->kctx = kctx;
+ new_reg->flags = zone | KBASE_REG_FREE;
+
+ new_reg->flags |= KBASE_REG_GROWABLE;
+
+ new_reg->start_pfn = start_pfn;
+ new_reg->nr_pages = nr_pages;
+
+ return new_reg;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_free_region);
+
+/**
+ * @brief Free a region object.
+ *
+ * The described region must be freed of any mapping.
+ *
+ * If the region is not flagged as KBASE_REG_FREE, the region's
+ * alloc object will be released.
+ * It is a bug if no alloc object exists for non-free regions.
+ *
+ */
+void kbase_free_alloced_region(struct kbase_va_region *reg)
+{
+ if (!(reg->flags & KBASE_REG_FREE)) {
+ /*
+ * The physical allocation should have been removed from the
+ * eviction list before this function is called. However, in the
+ * case of abnormal process termination or the app leaking the
+ * memory kbase_mem_free_region is not called so it can still be
+ * on the list at termination time of the region tracker.
+ */
+ if (!list_empty(&reg->gpu_alloc->evict_node)) {
+ /*
+ * Unlink the physical allocation before unmaking it
+ * evictable so that the allocation isn't grown back to
+ * its last backed size as we're going to unmap it
+ * anyway.
+ */
+ reg->cpu_alloc->reg = NULL;
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ reg->gpu_alloc->reg = NULL;
+
+ /*
+ * If a region has been made evictable then we must
+ * unmake it before trying to free it.
+ * If the memory hasn't been reclaimed it will be
+ * unmapped and freed below, if it has been reclaimed
+ * then the operations below are no-ops.
+ */
+ if (reg->flags & KBASE_REG_DONT_NEED) {
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
+ KBASE_MEM_TYPE_NATIVE);
+ kbase_mem_evictable_unmake(reg->gpu_alloc);
+ }
+ }
+
+ /*
+ * Remove the region from the sticky resource metadata
+ * list should it be there.
+ */
+ kbase_sticky_resource_release(reg->kctx, NULL,
+ reg->start_pfn << PAGE_SHIFT);
+
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ /* To detect use-after-free in debug builds */
+ KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
+ }
+ kfree(reg);
+}
+
+KBASE_EXPORT_TEST_API(kbase_free_alloced_region);
+
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align)
+{
+ int err;
+ size_t i = 0;
+ unsigned long attr;
+ unsigned long mask = ~KBASE_REG_MEMATTR_MASK;
+
+ if ((kctx->kbdev->system_coherency == COHERENCY_ACE) &&
+ (reg->flags & KBASE_REG_SHARE_BOTH))
+ attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_OUTER_WA);
+ else
+ attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC);
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ err = kbase_add_va_region(kctx, reg, addr, nr_pages, align);
+ if (err)
+ return err;
+
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+ u64 stride;
+ struct kbase_mem_phy_alloc *alloc;
+
+ alloc = reg->gpu_alloc;
+ stride = alloc->imported.alias.stride;
+ KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
+ for (i = 0; i < alloc->imported.alias.nents; i++) {
+ if (alloc->imported.alias.aliased[i].alloc) {
+ err = kbase_mmu_insert_pages(kctx,
+ reg->start_pfn + (i * stride),
+ alloc->imported.alias.aliased[i].alloc->pages + alloc->imported.alias.aliased[i].offset,
+ alloc->imported.alias.aliased[i].length,
+ reg->flags);
+ if (err)
+ goto bad_insert;
+
+ kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
+ } else {
+ err = kbase_mmu_insert_single_page(kctx,
+ reg->start_pfn + i * stride,
+ page_to_phys(kctx->aliasing_sink_page),
+ alloc->imported.alias.aliased[i].length,
+ (reg->flags & mask) | attr);
+
+ if (err)
+ goto bad_insert;
+ }
+ }
+ } else {
+ err = kbase_mmu_insert_pages(kctx, reg->start_pfn,
+ kbase_get_gpu_phy_pages(reg),
+ kbase_reg_current_backed_size(reg),
+ reg->flags);
+ if (err)
+ goto bad_insert;
+ kbase_mem_phy_alloc_gpu_mapped(reg->gpu_alloc);
+ }
+
+ return err;
+
+bad_insert:
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+ u64 stride;
+
+ stride = reg->gpu_alloc->imported.alias.stride;
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
+ while (i--)
+ if (reg->gpu_alloc->imported.alias.aliased[i].alloc) {
+ kbase_mmu_teardown_pages(kctx, reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length);
+ kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+ }
+ }
+
+ kbase_remove_va_region(kctx, reg);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_mmap);
+
+int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ int err;
+
+ if (reg->start_pfn == 0)
+ return 0;
+
+ if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+ size_t i;
+
+ err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
+ for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
+ if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
+ kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+ } else {
+ err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg));
+ kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
+ }
+
+ if (err)
+ return err;
+
+ err = kbase_remove_va_region(kctx, reg);
+ return err;
+}
+
+static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping_of_region(const struct kbase_va_region *reg, unsigned long uaddr, size_t size)
+{
+ struct kbase_cpu_mapping *map;
+ struct list_head *pos;
+
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+
+ if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */
+ return NULL;
+
+ list_for_each(pos, &reg->cpu_alloc->mappings) {
+ map = list_entry(pos, struct kbase_cpu_mapping, mappings_list);
+ if (map->vm_start <= uaddr && map->vm_end >= uaddr + size)
+ return map;
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_of_region);
+
+int kbasep_find_enclosing_cpu_mapping_offset(
+ struct kbase_context *kctx, u64 gpu_addr,
+ unsigned long uaddr, size_t size, u64 * offset)
+{
+ struct kbase_cpu_mapping *map = NULL;
+ const struct kbase_va_region *reg;
+ int err = -EINVAL;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbase_gpu_vm_lock(kctx);
+
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+ if (reg && !(reg->flags & KBASE_REG_FREE)) {
+ map = kbasep_find_enclosing_cpu_mapping_of_region(reg, uaddr,
+ size);
+ if (map) {
+ *offset = (uaddr - PTR_TO_U64(map->vm_start)) +
+ (map->page_off << PAGE_SHIFT);
+ err = 0;
+ }
+ }
+
+ kbase_gpu_vm_unlock(kctx);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_offset);
+
+void kbase_sync_single(struct kbase_context *kctx,
+ phys_addr_t cpu_pa, phys_addr_t gpu_pa,
+ off_t offset, size_t size, enum kbase_sync_type sync_fn)
+{
+ struct page *cpu_page;
+
+ cpu_page = pfn_to_page(PFN_DOWN(cpu_pa));
+
+ if (likely(cpu_pa == gpu_pa)) {
+ dma_addr_t dma_addr;
+
+ BUG_ON(!cpu_page);
+ BUG_ON(offset + size > PAGE_SIZE);
+
+ dma_addr = kbase_dma_addr(cpu_page) + offset;
+ if (sync_fn == KBASE_SYNC_TO_CPU)
+ dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr,
+ size, DMA_BIDIRECTIONAL);
+ else if (sync_fn == KBASE_SYNC_TO_DEVICE)
+ dma_sync_single_for_device(kctx->kbdev->dev, dma_addr,
+ size, DMA_BIDIRECTIONAL);
+ } else {
+ void *src = NULL;
+ void *dst = NULL;
+ struct page *gpu_page;
+
+ if (WARN(!gpu_pa, "No GPU PA found for infinite cache op"))
+ return;
+
+ gpu_page = pfn_to_page(PFN_DOWN(gpu_pa));
+
+ if (sync_fn == KBASE_SYNC_TO_DEVICE) {
+ src = ((unsigned char *)kmap(cpu_page)) + offset;
+ dst = ((unsigned char *)kmap(gpu_page)) + offset;
+ } else if (sync_fn == KBASE_SYNC_TO_CPU) {
+ dma_sync_single_for_cpu(kctx->kbdev->dev,
+ kbase_dma_addr(gpu_page) + offset,
+ size, DMA_BIDIRECTIONAL);
+ src = ((unsigned char *)kmap(gpu_page)) + offset;
+ dst = ((unsigned char *)kmap(cpu_page)) + offset;
+ }
+ memcpy(dst, src, size);
+ kunmap(gpu_page);
+ kunmap(cpu_page);
+ if (sync_fn == KBASE_SYNC_TO_DEVICE)
+ dma_sync_single_for_device(kctx->kbdev->dev,
+ kbase_dma_addr(gpu_page) + offset,
+ size, DMA_BIDIRECTIONAL);
+ }
+}
+
+static int kbase_do_syncset(struct kbase_context *kctx,
+ struct base_syncset *set, enum kbase_sync_type sync_fn)
+{
+ int err = 0;
+ struct basep_syncset *sset = &set->basep_sset;
+ struct kbase_va_region *reg;
+ struct kbase_cpu_mapping *map;
+ unsigned long start;
+ size_t size;
+ phys_addr_t *cpu_pa;
+ phys_addr_t *gpu_pa;
+ u64 page_off, page_count;
+ u64 i;
+ off_t offset;
+
+ kbase_os_mem_map_lock(kctx);
+ kbase_gpu_vm_lock(kctx);
+
+ /* find the region where the virtual address is contained */
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+ sset->mem_handle.basep.handle);
+ if (!reg) {
+ dev_warn(kctx->kbdev->dev, "Can't find region at VA 0x%016llX",
+ sset->mem_handle.basep.handle);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!(reg->flags & KBASE_REG_CPU_CACHED))
+ goto out_unlock;
+
+ start = (uintptr_t)sset->user_addr;
+ size = (size_t)sset->size;
+
+ map = kbasep_find_enclosing_cpu_mapping_of_region(reg, start, size);
+ if (!map) {
+ dev_warn(kctx->kbdev->dev, "Can't find CPU mapping 0x%016lX for VA 0x%016llX",
+ start, sset->mem_handle.basep.handle);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ offset = start & (PAGE_SIZE - 1);
+ page_off = map->page_off + ((start - map->vm_start) >> PAGE_SHIFT);
+ page_count = (size + offset + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ cpu_pa = kbase_get_cpu_phy_pages(reg);
+ gpu_pa = kbase_get_gpu_phy_pages(reg);
+
+ /* Sync first page */
+ if (cpu_pa[page_off]) {
+ size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+
+ kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off],
+ offset, sz, sync_fn);
+ }
+
+ /* Sync middle pages (if any) */
+ for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+ /* we grow upwards, so bail on first non-present page */
+ if (!cpu_pa[page_off + i])
+ break;
+
+ kbase_sync_single(kctx, cpu_pa[page_off + i],
+ gpu_pa[page_off + i], 0, PAGE_SIZE, sync_fn);
+ }
+
+ /* Sync last page (if any) */
+ if (page_count > 1 && cpu_pa[page_off + page_count - 1]) {
+ size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1;
+
+ kbase_sync_single(kctx, cpu_pa[page_off + page_count - 1],
+ gpu_pa[page_off + page_count - 1], 0, sz,
+ sync_fn);
+ }
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ kbase_os_mem_map_unlock(kctx);
+ return err;
+}
+
+int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset)
+{
+ int err = -EINVAL;
+ struct basep_syncset *sset;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != syncset);
+
+ sset = &syncset->basep_sset;
+
+ switch (sset->type) {
+ case BASE_SYNCSET_OP_MSYNC:
+ err = kbase_do_syncset(kctx, syncset, KBASE_SYNC_TO_DEVICE);
+ break;
+
+ case BASE_SYNCSET_OP_CSYNC:
+ err = kbase_do_syncset(kctx, syncset, KBASE_SYNC_TO_CPU);
+ break;
+
+ default:
+ dev_warn(kctx->kbdev->dev, "Unknown msync op %d\n", sset->type);
+ break;
+ }
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_sync_now);
+
+/* vm lock must be held */
+int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ int err;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /*
+ * Unlink the physical allocation before unmaking it evictable so
+ * that the allocation isn't grown back to its last backed size
+ * as we're going to unmap it anyway.
+ */
+ reg->cpu_alloc->reg = NULL;
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ reg->gpu_alloc->reg = NULL;
+
+ /*
+ * If a region has been made evictable then we must unmake it
+ * before trying to free it.
+ * If the memory hasn't been reclaimed it will be unmapped and freed
+ * below, if it has been reclaimed then the operations below are no-ops.
+ */
+ if (reg->flags & KBASE_REG_DONT_NEED) {
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
+ KBASE_MEM_TYPE_NATIVE);
+ kbase_mem_evictable_unmake(reg->gpu_alloc);
+ }
+
+ err = kbase_gpu_munmap(kctx, reg);
+ if (err) {
+ dev_warn(reg->kctx->kbdev->dev, "Could not unmap from the GPU...\n");
+ goto out;
+ }
+
+ /* This will also free the physical pages */
+ kbase_free_alloced_region(reg);
+
+ out:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free_region);
+
+/**
+ * @brief Free the region from the GPU and unregister it.
+ *
+ * This function implements the free operation on a memory segment.
+ * It will loudly fail if called with outstanding mappings.
+ */
+int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
+{
+ int err = 0;
+ struct kbase_va_region *reg;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ if (0 == gpu_addr) {
+ dev_warn(kctx->kbdev->dev, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n");
+ return -EINVAL;
+ }
+ kbase_gpu_vm_lock(kctx);
+
+ if (gpu_addr >= BASE_MEM_COOKIE_BASE &&
+ gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) {
+ int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE);
+
+ reg = kctx->pending_regions[cookie];
+ if (!reg) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* ask to unlink the cookie as we'll free it */
+
+ kctx->pending_regions[cookie] = NULL;
+ kctx->cookies |= (1UL << cookie);
+
+ kbase_free_alloced_region(reg);
+ } else {
+ /* A real GPU va */
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE)) {
+ dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
+ gpu_addr);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA) {
+ /* SAME_VA must be freed through munmap */
+ dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__,
+ gpu_addr);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ err = kbase_mem_free_region(kctx, reg);
+ }
+
+ out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free);
+
+void kbase_update_region_flags(struct kbase_context *kctx,
+ struct kbase_va_region *reg, unsigned long flags)
+{
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
+
+ reg->flags |= kbase_cache_enabled(flags, reg->nr_pages);
+ /* all memory is now growable */
+ reg->flags |= KBASE_REG_GROWABLE;
+
+ if (flags & BASE_MEM_GROW_ON_GPF)
+ reg->flags |= KBASE_REG_PF_GROW;
+
+ if (flags & BASE_MEM_PROT_CPU_WR)
+ reg->flags |= KBASE_REG_CPU_WR;
+
+ if (flags & BASE_MEM_PROT_CPU_RD)
+ reg->flags |= KBASE_REG_CPU_RD;
+
+ if (flags & BASE_MEM_PROT_GPU_WR)
+ reg->flags |= KBASE_REG_GPU_WR;
+
+ if (flags & BASE_MEM_PROT_GPU_RD)
+ reg->flags |= KBASE_REG_GPU_RD;
+
+ if (0 == (flags & BASE_MEM_PROT_GPU_EX))
+ reg->flags |= KBASE_REG_GPU_NX;
+
+ if (flags & BASE_MEM_COHERENT_SYSTEM ||
+ flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED)
+ reg->flags |= KBASE_REG_SHARE_BOTH;
+ else if (flags & BASE_MEM_COHERENT_LOCAL)
+ reg->flags |= KBASE_REG_SHARE_IN;
+
+ /* Set up default MEMATTR usage */
+ if (kctx->kbdev->system_coherency == COHERENCY_ACE &&
+ (reg->flags & KBASE_REG_SHARE_BOTH)) {
+ reg->flags |=
+ KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE);
+ } else {
+ reg->flags |=
+ KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
+ }
+}
+KBASE_EXPORT_TEST_API(kbase_update_region_flags);
+
+int kbase_alloc_phy_pages_helper(
+ struct kbase_mem_phy_alloc *alloc,
+ size_t nr_pages_requested)
+{
+ int new_page_count __maybe_unused;
+ size_t old_page_count = alloc->nents;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+ KBASE_DEBUG_ASSERT(alloc->imported.kctx);
+
+ if (nr_pages_requested == 0)
+ goto done; /*nothing to do*/
+
+ new_page_count = kbase_atomic_add_pages(
+ nr_pages_requested, &alloc->imported.kctx->used_pages);
+ kbase_atomic_add_pages(nr_pages_requested, &alloc->imported.kctx->kbdev->memdev.used_pages);
+
+ /* Increase mm counters before we allocate pages so that this
+ * allocation is visible to the OOM killer */
+ kbase_process_page_usage_inc(alloc->imported.kctx, nr_pages_requested);
+
+ if (kbase_mem_pool_alloc_pages(&alloc->imported.kctx->mem_pool,
+ nr_pages_requested, alloc->pages + old_page_count) != 0)
+ goto no_alloc;
+
+ /*
+ * Request a zone cache update, this scans only the new pages an
+ * appends their information to the zone cache. if the update
+ * fails then clear the cache so we fall-back to doing things
+ * page by page.
+ */
+ if (kbase_zone_cache_update(alloc, old_page_count) != 0)
+ kbase_zone_cache_clear(alloc);
+
+ kbase_tlstream_aux_pagesalloc(
+ (u32)alloc->imported.kctx->id,
+ (u64)new_page_count);
+
+ alloc->nents += nr_pages_requested;
+done:
+ return 0;
+
+no_alloc:
+ kbase_process_page_usage_dec(alloc->imported.kctx, nr_pages_requested);
+ kbase_atomic_sub_pages(nr_pages_requested, &alloc->imported.kctx->used_pages);
+ kbase_atomic_sub_pages(nr_pages_requested, &alloc->imported.kctx->kbdev->memdev.used_pages);
+
+ return -ENOMEM;
+}
+
+int kbase_free_phy_pages_helper(
+ struct kbase_mem_phy_alloc *alloc,
+ size_t nr_pages_to_free)
+{
+ struct kbase_context *kctx = alloc->imported.kctx;
+ bool syncback;
+ bool reclaimed = (alloc->evicted != 0);
+ phys_addr_t *start_free;
+ int new_page_count __maybe_unused;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+ KBASE_DEBUG_ASSERT(alloc->imported.kctx);
+ KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free);
+
+ /* early out if nothing to do */
+ if (0 == nr_pages_to_free)
+ return 0;
+
+ start_free = alloc->pages + alloc->nents - nr_pages_to_free;
+
+ syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+ /*
+ * Clear the zone cache, we don't expect JIT allocations to be
+ * shrunk in parts so there is no point trying to optimize for that
+ * by scanning for the changes caused by freeing this memory and
+ * updating the existing cache entries.
+ */
+ kbase_zone_cache_clear(alloc);
+
+ kbase_mem_pool_free_pages(&kctx->mem_pool,
+ nr_pages_to_free,
+ start_free,
+ syncback,
+ reclaimed);
+
+ alloc->nents -= nr_pages_to_free;
+
+ /*
+ * If the allocation was not evicted (i.e. evicted == 0) then
+ * the page accounting needs to be done.
+ */
+ if (!reclaimed) {
+ kbase_process_page_usage_dec(kctx, nr_pages_to_free);
+ new_page_count = kbase_atomic_sub_pages(nr_pages_to_free,
+ &kctx->used_pages);
+ kbase_atomic_sub_pages(nr_pages_to_free,
+ &kctx->kbdev->memdev.used_pages);
+
+ kbase_tlstream_aux_pagesalloc(
+ (u32)kctx->id,
+ (u64)new_page_count);
+ }
+
+ return 0;
+}
+
+void kbase_mem_kref_free(struct kref *kref)
+{
+ struct kbase_mem_phy_alloc *alloc;
+
+ alloc = container_of(kref, struct kbase_mem_phy_alloc, kref);
+
+ switch (alloc->type) {
+ case KBASE_MEM_TYPE_NATIVE: {
+ WARN_ON(!alloc->imported.kctx);
+ /*
+ * The physical allocation must have been removed from the
+ * eviction list before trying to free it.
+ */
+ WARN_ON(!list_empty(&alloc->evict_node));
+ kbase_free_phy_pages_helper(alloc, alloc->nents);
+ break;
+ }
+ case KBASE_MEM_TYPE_ALIAS: {
+ /* just call put on the underlying phy allocs */
+ size_t i;
+ struct kbase_aliased *aliased;
+
+ aliased = alloc->imported.alias.aliased;
+ if (aliased) {
+ for (i = 0; i < alloc->imported.alias.nents; i++)
+ if (aliased[i].alloc)
+ kbase_mem_phy_alloc_put(aliased[i].alloc);
+ vfree(aliased);
+ }
+ break;
+ }
+ case KBASE_MEM_TYPE_RAW:
+ /* raw pages, external cleanup */
+ break;
+ #ifdef CONFIG_UMP
+ case KBASE_MEM_TYPE_IMPORTED_UMP:
+ ump_dd_release(alloc->imported.ump_handle);
+ break;
+#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM:
+ dma_buf_detach(alloc->imported.umm.dma_buf,
+ alloc->imported.umm.dma_attachment);
+ dma_buf_put(alloc->imported.umm.dma_buf);
+ break;
+#endif
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ if (alloc->imported.user_buf.mm)
+ mmdrop(alloc->imported.user_buf.mm);
+ kfree(alloc->imported.user_buf.pages);
+ break;
+ case KBASE_MEM_TYPE_TB:{
+ void *tb;
+
+ tb = alloc->imported.kctx->jctx.tb;
+ kbase_device_trace_buffer_uninstall(alloc->imported.kctx);
+ vfree(tb);
+ break;
+ }
+ default:
+ WARN(1, "Unexecpted free of type %d\n", alloc->type);
+ break;
+ }
+
+ /* Free based on allocation type */
+ if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+ vfree(alloc);
+ else
+ kfree(alloc);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_kref_free);
+
+int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size)
+{
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(vsize > 0);
+
+ /* validate user provided arguments */
+ if (size > vsize || vsize > reg->nr_pages)
+ goto out_term;
+
+ /* Prevent vsize*sizeof from wrapping around.
+ * For instance, if vsize is 2**29+1, we'll allocate 1 byte and the alloc won't fail.
+ */
+ if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages)))
+ goto out_term;
+
+ KBASE_DEBUG_ASSERT(0 != vsize);
+
+ if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, size) != 0)
+ goto out_term;
+
+ reg->cpu_alloc->reg = reg;
+ if (reg->cpu_alloc != reg->gpu_alloc) {
+ if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, size) != 0)
+ goto out_rollback;
+ reg->gpu_alloc->reg = reg;
+ }
+
+ return 0;
+
+out_rollback:
+ kbase_free_phy_pages_helper(reg->cpu_alloc, size);
+out_term:
+ return -1;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_phy_pages);
+
+bool kbase_check_alloc_flags(unsigned long flags)
+{
+ /* Only known input flags should be set. */
+ if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
+ return false;
+
+ /* At least one flag should be set */
+ if (flags == 0)
+ return false;
+
+ /* Either the GPU or CPU must be reading from the allocated memory */
+ if ((flags & (BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD)) == 0)
+ return false;
+
+ /* Either the GPU or CPU must be writing to the allocated memory */
+ if ((flags & (BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR)) == 0)
+ return false;
+
+ /* GPU cannot be writing to GPU executable memory and cannot grow the memory on page fault. */
+ if ((flags & BASE_MEM_PROT_GPU_EX) && (flags & (BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF)))
+ return false;
+
+ /* GPU should have at least read or write access otherwise there is no
+ reason for allocating. */
+ if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
+ return false;
+
+ /* BASE_MEM_IMPORT_SHARED is only valid for imported memory */
+ if ((flags & BASE_MEM_IMPORT_SHARED) == BASE_MEM_IMPORT_SHARED)
+ return false;
+
+ return true;
+}
+
+bool kbase_check_import_flags(unsigned long flags)
+{
+ /* Only known input flags should be set. */
+ if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
+ return false;
+
+ /* At least one flag should be set */
+ if (flags == 0)
+ return false;
+
+ /* Imported memory cannot be GPU executable */
+ if (flags & BASE_MEM_PROT_GPU_EX)
+ return false;
+
+ /* Imported memory cannot grow on page fault */
+ if (flags & BASE_MEM_GROW_ON_GPF)
+ return false;
+
+ /* GPU should have at least read or write access otherwise there is no
+ reason for importing. */
+ if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
+ return false;
+
+ /* Secure memory cannot be read by the CPU */
+ if ((flags & BASE_MEM_SECURE) && (flags & BASE_MEM_PROT_CPU_RD))
+ return false;
+
+ return true;
+}
+
+/**
+ * @brief Acquire the per-context region list lock
+ */
+void kbase_gpu_vm_lock(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ mutex_lock(&kctx->reg_lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_vm_lock);
+
+/**
+ * @brief Release the per-context region list lock
+ */
+void kbase_gpu_vm_unlock(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ mutex_unlock(&kctx->reg_lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_vm_unlock);
+
+#ifdef CONFIG_DEBUG_FS
+struct kbase_jit_debugfs_data {
+ int (*func)(struct kbase_jit_debugfs_data *);
+ struct mutex lock;
+ struct kbase_context *kctx;
+ u64 active_value;
+ u64 pool_value;
+ u64 destroy_value;
+ char buffer[50];
+};
+
+static int kbase_jit_debugfs_common_open(struct inode *inode,
+ struct file *file, int (*func)(struct kbase_jit_debugfs_data *))
+{
+ struct kbase_jit_debugfs_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->func = func;
+ mutex_init(&data->lock);
+ data->kctx = (struct kbase_context *) inode->i_private;
+
+ file->private_data = data;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t kbase_jit_debugfs_common_read(struct file *file,
+ char __user *buf, size_t len, loff_t *ppos)
+{
+ struct kbase_jit_debugfs_data *data;
+ size_t size;
+ int ret;
+
+ data = (struct kbase_jit_debugfs_data *) file->private_data;
+ mutex_lock(&data->lock);
+
+ if (*ppos) {
+ size = strnlen(data->buffer, sizeof(data->buffer));
+ } else {
+ if (!data->func) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ if (data->func(data)) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ size = scnprintf(data->buffer, sizeof(data->buffer),
+ "%llu,%llu,%llu", data->active_value,
+ data->pool_value, data->destroy_value);
+ }
+
+ ret = simple_read_from_buffer(buf, len, ppos, data->buffer, size);
+
+out_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int kbase_jit_debugfs_common_release(struct inode *inode,
+ struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+#define KBASE_JIT_DEBUGFS_DECLARE(__fops, __func) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+ return kbase_jit_debugfs_common_open(inode, file, __func); \
+} \
+static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = kbase_jit_debugfs_common_release, \
+ .read = kbase_jit_debugfs_common_read, \
+ .write = NULL, \
+ .llseek = generic_file_llseek, \
+}
+
+static int kbase_jit_debugfs_count_get(struct kbase_jit_debugfs_data *data)
+{
+ struct kbase_context *kctx = data->kctx;
+ struct list_head *tmp;
+
+ mutex_lock(&kctx->jit_lock);
+ list_for_each(tmp, &kctx->jit_active_head) {
+ data->active_value++;
+ }
+
+ list_for_each(tmp, &kctx->jit_pool_head) {
+ data->pool_value++;
+ }
+
+ list_for_each(tmp, &kctx->jit_destroy_head) {
+ data->destroy_value++;
+ }
+ mutex_unlock(&kctx->jit_lock);
+
+ return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_count_fops,
+ kbase_jit_debugfs_count_get);
+
+static int kbase_jit_debugfs_vm_get(struct kbase_jit_debugfs_data *data)
+{
+ struct kbase_context *kctx = data->kctx;
+ struct kbase_va_region *reg;
+
+ mutex_lock(&kctx->jit_lock);
+ list_for_each_entry(reg, &kctx->jit_active_head, jit_node) {
+ data->active_value += reg->nr_pages;
+ }
+
+ list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) {
+ data->pool_value += reg->nr_pages;
+ }
+
+ list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) {
+ data->destroy_value += reg->nr_pages;
+ }
+ mutex_unlock(&kctx->jit_lock);
+
+ return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_vm_fops,
+ kbase_jit_debugfs_vm_get);
+
+static int kbase_jit_debugfs_phys_get(struct kbase_jit_debugfs_data *data)
+{
+ struct kbase_context *kctx = data->kctx;
+ struct kbase_va_region *reg;
+
+ mutex_lock(&kctx->jit_lock);
+ list_for_each_entry(reg, &kctx->jit_active_head, jit_node) {
+ data->active_value += reg->gpu_alloc->nents;
+ }
+
+ list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) {
+ data->pool_value += reg->gpu_alloc->nents;
+ }
+
+ list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) {
+ data->destroy_value += reg->gpu_alloc->nents;
+ }
+ mutex_unlock(&kctx->jit_lock);
+
+ return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops,
+ kbase_jit_debugfs_phys_get);
+
+void kbase_jit_debugfs_init(struct kbase_context *kctx)
+{
+ /* Debugfs entry for getting the number of JIT allocations. */
+ debugfs_create_file("mem_jit_count", S_IRUGO, kctx->kctx_dentry,
+ kctx, &kbase_jit_debugfs_count_fops);
+
+ /*
+ * Debugfs entry for getting the total number of virtual pages
+ * used by JIT allocations.
+ */
+ debugfs_create_file("mem_jit_vm", S_IRUGO, kctx->kctx_dentry,
+ kctx, &kbase_jit_debugfs_vm_fops);
+
+ /*
+ * Debugfs entry for getting the number of physical pages used
+ * by JIT allocations.
+ */
+ debugfs_create_file("mem_jit_phys", S_IRUGO, kctx->kctx_dentry,
+ kctx, &kbase_jit_debugfs_phys_fops);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * kbase_jit_destroy_worker - Deferred worker which frees JIT allocations
+ * @work: Work item
+ *
+ * This function does the work of freeing JIT allocations whose physical
+ * backing has been released.
+ */
+static void kbase_jit_destroy_worker(struct work_struct *work)
+{
+ struct kbase_context *kctx;
+ struct kbase_va_region *reg;
+
+ kctx = container_of(work, struct kbase_context, jit_work);
+ do {
+ mutex_lock(&kctx->jit_lock);
+ if (list_empty(&kctx->jit_destroy_head))
+ reg = NULL;
+ else
+ reg = list_first_entry(&kctx->jit_destroy_head,
+ struct kbase_va_region, jit_node);
+
+ if (reg) {
+ list_del(&reg->jit_node);
+ mutex_unlock(&kctx->jit_lock);
+
+ kbase_gpu_vm_lock(kctx);
+ kbase_mem_free_region(kctx, reg);
+ kbase_gpu_vm_unlock(kctx);
+ } else
+ mutex_unlock(&kctx->jit_lock);
+ } while (reg);
+}
+
+int kbase_jit_init(struct kbase_context *kctx)
+{
+ INIT_LIST_HEAD(&kctx->jit_active_head);
+ INIT_LIST_HEAD(&kctx->jit_pool_head);
+ INIT_LIST_HEAD(&kctx->jit_destroy_head);
+ mutex_init(&kctx->jit_lock);
+ INIT_WORK(&kctx->jit_work, kbase_jit_destroy_worker);
+
+ return 0;
+}
+
+struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
+ struct base_jit_alloc_info *info)
+{
+ struct kbase_va_region *reg = NULL;
+ struct kbase_va_region *walker;
+ struct kbase_va_region *temp;
+ size_t current_diff = SIZE_MAX;
+
+ int ret;
+
+ mutex_lock(&kctx->jit_lock);
+ /*
+ * Scan the pool for an existing allocation which meets our
+ * requirements and remove it.
+ */
+ list_for_each_entry_safe(walker, temp, &kctx->jit_pool_head, jit_node) {
+
+ if (walker->nr_pages >= info->va_pages) {
+ size_t min_size, max_size, diff;
+
+ /*
+ * The JIT allocations VA requirements have been
+ * meet, it's suitable but other allocations
+ * might be a better fit.
+ */
+ min_size = min_t(size_t, walker->gpu_alloc->nents,
+ info->commit_pages);
+ max_size = max_t(size_t, walker->gpu_alloc->nents,
+ info->commit_pages);
+ diff = max_size - min_size;
+
+ if (current_diff > diff) {
+ current_diff = diff;
+ reg = walker;
+ }
+
+ /* The allocation is an exact match, stop looking */
+ if (current_diff == 0)
+ break;
+ }
+ }
+
+ if (reg) {
+ /*
+ * Remove the found region from the pool and add it to the
+ * active list.
+ */
+ list_del_init(&reg->jit_node);
+ list_add(&reg->jit_node, &kctx->jit_active_head);
+
+ /* Release the jit lock before modifying the allocation */
+ mutex_unlock(&kctx->jit_lock);
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* Make the physical backing no longer reclaimable */
+ if (!kbase_mem_evictable_unmake(reg->gpu_alloc))
+ goto update_failed;
+
+ /* Grow the backing if required */
+ if (reg->gpu_alloc->nents < info->commit_pages) {
+ size_t delta;
+ size_t old_size = reg->gpu_alloc->nents;
+
+ /* Allocate some more pages */
+ delta = info->commit_pages - reg->gpu_alloc->nents;
+ if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, delta)
+ != 0)
+ goto update_failed;
+
+ if (reg->cpu_alloc != reg->gpu_alloc) {
+ if (kbase_alloc_phy_pages_helper(
+ reg->cpu_alloc, delta) != 0) {
+ kbase_free_phy_pages_helper(
+ reg->gpu_alloc, delta);
+ goto update_failed;
+ }
+ }
+
+ ret = kbase_mem_grow_gpu_mapping(kctx, reg,
+ info->commit_pages, old_size);
+ /*
+ * The grow failed so put the allocation back in the
+ * pool and return failure.
+ */
+ if (ret)
+ goto update_failed;
+ }
+ kbase_gpu_vm_unlock(kctx);
+ } else {
+ /* No suitable JIT allocation was found so create a new one */
+ u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
+ BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
+ BASE_MEM_COHERENT_LOCAL;
+ u64 gpu_addr;
+ u16 alignment;
+
+ mutex_unlock(&kctx->jit_lock);
+
+ reg = kbase_mem_alloc(kctx, info->va_pages, info->commit_pages,
+ info->extent, &flags, &gpu_addr, &alignment);
+ if (!reg)
+ goto out_unlocked;
+
+ mutex_lock(&kctx->jit_lock);
+ list_add(&reg->jit_node, &kctx->jit_active_head);
+ mutex_unlock(&kctx->jit_lock);
+ }
+
+ return reg;
+
+update_failed:
+ /*
+ * An update to an allocation from the pool failed, chances
+ * are slim a new allocation would fair any better so return
+ * the allocation to the pool and return the function with failure.
+ */
+ kbase_gpu_vm_unlock(kctx);
+ mutex_lock(&kctx->jit_lock);
+ list_del_init(&reg->jit_node);
+ list_add(&reg->jit_node, &kctx->jit_pool_head);
+ mutex_unlock(&kctx->jit_lock);
+out_unlocked:
+ return NULL;
+}
+
+void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ /* The physical backing of memory in the pool is always reclaimable */
+ down_read(&kctx->process_mm->mmap_sem);
+ kbase_gpu_vm_lock(kctx);
+ kbase_mem_evictable_make(reg->gpu_alloc);
+ kbase_gpu_vm_unlock(kctx);
+ up_read(&kctx->process_mm->mmap_sem);
+
+ mutex_lock(&kctx->jit_lock);
+ list_del_init(&reg->jit_node);
+ list_add(&reg->jit_node, &kctx->jit_pool_head);
+ mutex_unlock(&kctx->jit_lock);
+}
+
+void kbase_jit_backing_lost(struct kbase_va_region *reg)
+{
+ struct kbase_context *kctx = reg->kctx;
+
+ /*
+ * JIT allocations will always be on a list, if the region
+ * is not on a list then it's not a JIT allocation.
+ */
+ if (list_empty(&reg->jit_node))
+ return;
+
+ /*
+ * Freeing the allocation requires locks we might not be able
+ * to take now, so move the allocation to the free list and kick
+ * the worker which will do the freeing.
+ */
+ mutex_lock(&kctx->jit_lock);
+ list_del_init(&reg->jit_node);
+ list_add(&reg->jit_node, &kctx->jit_destroy_head);
+ mutex_unlock(&kctx->jit_lock);
+
+ schedule_work(&kctx->jit_work);
+}
+
+bool kbase_jit_evict(struct kbase_context *kctx)
+{
+ struct kbase_va_region *reg = NULL;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /* Free the oldest allocation from the pool */
+ mutex_lock(&kctx->jit_lock);
+ if (!list_empty(&kctx->jit_pool_head)) {
+ reg = list_entry(kctx->jit_pool_head.prev,
+ struct kbase_va_region, jit_node);
+ list_del(&reg->jit_node);
+ }
+ mutex_unlock(&kctx->jit_lock);
+
+ if (reg)
+ kbase_mem_free_region(kctx, reg);
+
+ return (reg != NULL);
+}
+
+void kbase_jit_term(struct kbase_context *kctx)
+{
+ struct kbase_va_region *walker;
+
+ /* Free all allocations for this context */
+
+ /*
+ * Flush the freeing of allocations whose backing has been freed
+ * (i.e. everything in jit_destroy_head).
+ */
+ cancel_work_sync(&kctx->jit_work);
+
+ kbase_gpu_vm_lock(kctx);
+ /* Free all allocations from the pool */
+ while (!list_empty(&kctx->jit_pool_head)) {
+ walker = list_first_entry(&kctx->jit_pool_head,
+ struct kbase_va_region, jit_node);
+ list_del(&walker->jit_node);
+ kbase_mem_free_region(kctx, walker);
+ }
+
+ /* Free all allocations from active list */
+ while (!list_empty(&kctx->jit_active_head)) {
+ walker = list_first_entry(&kctx->jit_active_head,
+ struct kbase_va_region, jit_node);
+ list_del(&walker->jit_node);
+ kbase_mem_free_region(kctx, walker);
+ }
+ kbase_gpu_vm_unlock(kctx);
+}
+
+static int kbase_jd_user_buf_map(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ long pinned_pages;
+ struct kbase_mem_phy_alloc *alloc;
+ struct page **pages;
+ phys_addr_t *pa;
+ long i;
+ int err = -ENOMEM;
+ unsigned long address;
+ struct mm_struct *mm;
+ struct device *dev;
+ unsigned long offset;
+ unsigned long local_size;
+
+ alloc = reg->gpu_alloc;
+ pa = kbase_get_gpu_phy_pages(reg);
+ address = alloc->imported.user_buf.address;
+ mm = alloc->imported.user_buf.mm;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+
+ pages = alloc->imported.user_buf.pages;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ pinned_pages = get_user_pages(NULL, mm,
+ address,
+ alloc->imported.user_buf.nr_pages,
+ reg->flags & KBASE_REG_GPU_WR,
+ 0, pages, NULL);
+#else
+ pinned_pages = get_user_pages_remote(NULL, mm,
+ address,
+ alloc->imported.user_buf.nr_pages,
+ reg->flags & KBASE_REG_GPU_WR,
+ 0, pages, NULL);
+#endif
+
+ if (pinned_pages <= 0)
+ return pinned_pages;
+
+ if (pinned_pages != alloc->imported.user_buf.nr_pages) {
+ for (i = 0; i < pinned_pages; i++)
+ put_page(pages[i]);
+ return -ENOMEM;
+ }
+
+ dev = kctx->kbdev->dev;
+ offset = address & ~PAGE_MASK;
+ local_size = alloc->imported.user_buf.size;
+
+ for (i = 0; i < pinned_pages; i++) {
+ dma_addr_t dma_addr;
+ unsigned long min;
+
+ min = MIN(PAGE_SIZE - offset, local_size);
+ dma_addr = dma_map_page(dev, pages[i],
+ offset, min,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr))
+ goto unwind;
+
+ alloc->imported.user_buf.dma_addrs[i] = dma_addr;
+ pa[i] = page_to_phys(pages[i]);
+
+ local_size -= min;
+ offset = 0;
+ }
+
+ alloc->nents = pinned_pages;
+
+ err = kbase_mmu_insert_pages(kctx, reg->start_pfn, pa,
+ kbase_reg_current_backed_size(reg),
+ reg->flags);
+ if (err == 0)
+ return 0;
+
+ alloc->nents = 0;
+ /* fall down */
+unwind:
+ while (i--) {
+ dma_unmap_page(kctx->kbdev->dev,
+ alloc->imported.user_buf.dma_addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ put_page(pages[i]);
+ pages[i] = NULL;
+ }
+
+ return err;
+}
+
+static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
+ struct kbase_mem_phy_alloc *alloc, bool writeable)
+{
+ long i;
+ struct page **pages;
+ unsigned long size = alloc->imported.user_buf.size;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+ pages = alloc->imported.user_buf.pages;
+ for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) {
+ unsigned long local_size;
+ dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
+
+ local_size = MIN(size, PAGE_SIZE - (dma_addr & ~PAGE_MASK));
+ dma_unmap_page(kctx->kbdev->dev, dma_addr, local_size,
+ DMA_BIDIRECTIONAL);
+ if (writeable)
+ set_page_dirty_lock(pages[i]);
+ put_page(pages[i]);
+ pages[i] = NULL;
+
+ size -= local_size;
+ }
+ alloc->nents = 0;
+}
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static int kbase_jd_umm_map(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ struct sg_table *sgt;
+ struct scatterlist *s;
+ int i;
+ phys_addr_t *pa;
+ int err;
+ size_t count = 0;
+ struct kbase_mem_phy_alloc *alloc;
+
+ alloc = reg->gpu_alloc;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM);
+ KBASE_DEBUG_ASSERT(NULL == alloc->imported.umm.sgt);
+ sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment,
+ DMA_BIDIRECTIONAL);
+
+ if (IS_ERR_OR_NULL(sgt))
+ return -EINVAL;
+
+ /* save for later */
+ alloc->imported.umm.sgt = sgt;
+
+ pa = kbase_get_gpu_phy_pages(reg);
+ KBASE_DEBUG_ASSERT(pa);
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ int j;
+ size_t pages = PFN_UP(sg_dma_len(s));
+
+ WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1),
+ "sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n",
+ sg_dma_len(s));
+
+ WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1),
+ "sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n",
+ (unsigned long long) sg_dma_address(s));
+
+ for (j = 0; (j < pages) && (count < reg->nr_pages); j++,
+ count++)
+ *pa++ = sg_dma_address(s) + (j << PAGE_SHIFT);
+ WARN_ONCE(j < pages,
+ "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n",
+ alloc->imported.umm.dma_buf->size);
+ }
+
+ if (WARN_ONCE(count < reg->nr_pages,
+ "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
+ alloc->imported.umm.dma_buf->size)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Update nents as we now have pages to map */
+ alloc->nents = count;
+
+ err = kbase_mmu_insert_pages(kctx, reg->start_pfn,
+ kbase_get_gpu_phy_pages(reg),
+ kbase_reg_current_backed_size(reg),
+ reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD);
+
+out:
+ if (err) {
+ dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+ alloc->imported.umm.sgt = NULL;
+ }
+
+ return err;
+}
+
+static void kbase_jd_umm_unmap(struct kbase_context *kctx,
+ struct kbase_mem_phy_alloc *alloc)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(alloc);
+ KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
+ KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
+ dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+ alloc->imported.umm.sgt = NULL;
+ alloc->nents = 0;
+}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+#if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) \
+ || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS)
+static void add_kds_resource(struct kds_resource *kds_res,
+ struct kds_resource **kds_resources, u32 *kds_res_count,
+ unsigned long *kds_access_bitmap, bool exclusive)
+{
+ u32 i;
+
+ for (i = 0; i < *kds_res_count; i++) {
+ /* Duplicate resource, ignore */
+ if (kds_resources[i] == kds_res)
+ return;
+ }
+
+ kds_resources[*kds_res_count] = kds_res;
+ if (exclusive)
+ set_bit(*kds_res_count, kds_access_bitmap);
+ (*kds_res_count)++;
+}
+#endif
+
+struct kbase_mem_phy_alloc *kbase_map_external_resource(
+ struct kbase_context *kctx, struct kbase_va_region *reg,
+ struct mm_struct *locked_mm
+#ifdef CONFIG_KDS
+ , u32 *kds_res_count, struct kds_resource **kds_resources,
+ unsigned long *kds_access_bitmap, bool exclusive
+#endif
+ )
+{
+ int err;
+
+ /* decide what needs to happen for this resource */
+ switch (reg->gpu_alloc->type) {
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+ if (reg->gpu_alloc->imported.user_buf.mm != locked_mm)
+ goto exit;
+
+ reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
+ if (1 == reg->gpu_alloc->imported.user_buf.current_mapping_usage_count) {
+ err = kbase_jd_user_buf_map(kctx, reg);
+ if (err) {
+ reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--;
+ goto exit;
+ }
+ }
+ }
+ break;
+ case KBASE_MEM_TYPE_IMPORTED_UMP: {
+#if defined(CONFIG_KDS) && defined(CONFIG_UMP)
+ if (kds_res_count) {
+ struct kds_resource *kds_res;
+
+ kds_res = ump_dd_kds_resource_get(
+ reg->gpu_alloc->imported.ump_handle);
+ if (kds_res)
+ add_kds_resource(kds_res, kds_resources,
+ kds_res_count,
+ kds_access_bitmap, exclusive);
+ }
+#endif /*defined(CONFIG_KDS) && defined(CONFIG_UMP) */
+ break;
+ }
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM: {
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ if (kds_res_count) {
+ struct kds_resource *kds_res;
+
+ kds_res = get_dma_buf_kds_resource(
+ reg->gpu_alloc->imported.umm.dma_buf);
+ if (kds_res)
+ add_kds_resource(kds_res, kds_resources,
+ kds_res_count,
+ kds_access_bitmap, exclusive);
+ }
+#endif
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
+ if (1 == reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
+ err = kbase_jd_umm_map(kctx, reg);
+ if (err) {
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count--;
+ goto exit;
+ }
+ }
+ break;
+ }
+#endif
+ default:
+ goto exit;
+ }
+
+ return kbase_mem_phy_alloc_get(reg->gpu_alloc);
+exit:
+ return NULL;
+}
+
+void kbase_unmap_external_resource(struct kbase_context *kctx,
+ struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc)
+{
+ switch (alloc->type) {
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM: {
+ alloc->imported.umm.current_mapping_usage_count--;
+
+ if (0 == alloc->imported.umm.current_mapping_usage_count) {
+ if (reg && reg->gpu_alloc == alloc)
+ kbase_mmu_teardown_pages(
+ kctx,
+ reg->start_pfn,
+ kbase_reg_current_backed_size(reg));
+
+ kbase_jd_umm_unmap(kctx, alloc);
+ }
+ }
+ break;
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+ alloc->imported.user_buf.current_mapping_usage_count--;
+
+ if (0 == alloc->imported.user_buf.current_mapping_usage_count) {
+ bool writeable = true;
+
+ if (reg && reg->gpu_alloc == alloc)
+ kbase_mmu_teardown_pages(
+ kctx,
+ reg->start_pfn,
+ kbase_reg_current_backed_size(reg));
+
+ if (reg && ((reg->flags & KBASE_REG_GPU_WR) == 0))
+ writeable = false;
+
+ kbase_jd_user_buf_unmap(kctx, alloc, writeable);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ kbase_mem_phy_alloc_put(alloc);
+}
+
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
+ struct kbase_context *kctx, u64 gpu_addr)
+{
+ struct kbase_ctx_ext_res_meta *meta = NULL;
+ struct kbase_ctx_ext_res_meta *walker;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /*
+ * Walk the per context external resource metadata list for the
+ * metadata which matches the region which is being acquired.
+ */
+ list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) {
+ if (walker->gpu_addr == gpu_addr) {
+ meta = walker;
+ break;
+ }
+ }
+
+ /* No metadata exists so create one. */
+ if (!meta) {
+ struct kbase_va_region *reg;
+
+ /* Find the region */
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ kctx, gpu_addr);
+ if (NULL == reg || (reg->flags & KBASE_REG_FREE))
+ goto failed;
+
+ /* Allocate the metadata object */
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ goto failed;
+
+ /*
+ * Fill in the metadata object and acquire a reference
+ * for the physical resource.
+ */
+ meta->alloc = kbase_map_external_resource(kctx, reg, NULL
+#ifdef CONFIG_KDS
+ , NULL, NULL,
+ NULL, false
+#endif
+ );
+
+ if (!meta->alloc)
+ goto fail_map;
+
+ meta->gpu_addr = reg->start_pfn << PAGE_SHIFT;
+
+ list_add(&meta->ext_res_node, &kctx->ext_res_meta_head);
+ }
+
+ return meta;
+
+fail_map:
+ kfree(meta);
+failed:
+ return NULL;
+}
+
+bool kbase_sticky_resource_release(struct kbase_context *kctx,
+ struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr)
+{
+ struct kbase_ctx_ext_res_meta *walker;
+ struct kbase_va_region *reg;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /* Search of the metadata if one isn't provided. */
+ if (!meta) {
+ /*
+ * Walk the per context external resource metadata list for the
+ * metadata which matches the region which is being released.
+ */
+ list_for_each_entry(walker, &kctx->ext_res_meta_head,
+ ext_res_node) {
+ if (walker->gpu_addr == gpu_addr) {
+ meta = walker;
+ break;
+ }
+ }
+ }
+
+ /* No metadata so just return. */
+ if (!meta)
+ return false;
+
+ /* Drop the physical memory reference and free the metadata. */
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ kctx,
+ meta->gpu_addr);
+
+ kbase_unmap_external_resource(kctx, reg, meta->alloc);
+ list_del(&meta->ext_res_node);
+ kfree(meta);
+
+ return true;
+}
+
+int kbase_sticky_resource_init(struct kbase_context *kctx)
+{
+ INIT_LIST_HEAD(&kctx->ext_res_meta_head);
+
+ return 0;
+}
+
+void kbase_sticky_resource_term(struct kbase_context *kctx)
+{
+ struct kbase_ctx_ext_res_meta *walker;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /*
+ * Free any sticky resources which haven't been unmapped.
+ *
+ * Note:
+ * We don't care about refcounts at this point as no future
+ * references to the meta data will be made.
+ * Region termination would find these if we didn't free them
+ * here, but it's more efficient if we do the clean up here.
+ */
+ while (!list_empty(&kctx->ext_res_meta_head)) {
+ walker = list_first_entry(&kctx->ext_res_meta_head,
+ struct kbase_ctx_ext_res_meta, ext_res_node);
+
+ kbase_sticky_resource_release(kctx, walker, 0);
+ }
+}
diff --git a/midgard-0.r2p0/mali_kbase_mem.h b/midgard-0.r2p0/mali_kbase_mem.h
new file mode 100755
index 0000000..8953c85
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem.h
@@ -0,0 +1,1078 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem.h
+ * Base kernel memory APIs
+ */
+
+#ifndef _KBASE_MEM_H_
+#define _KBASE_MEM_H_
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+#include <linux/kref.h>
+#ifdef CONFIG_KDS
+#include <linux/kds.h>
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_UMP
+#include <linux/ump.h>
+#endif /* CONFIG_UMP */
+#include "mali_base_kernel.h"
+#include <mali_kbase_hw.h>
+#include "mali_kbase_pm.h"
+#include "mali_kbase_defs.h"
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include "mali_kbase_gator.h"
+#endif
+/* Required for kbase_mem_evictable_unmake */
+#include "mali_kbase_mem_linux.h"
+
+/* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
+
+/* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
+The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and
+page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table
+updates and generates duplicate page faults as the page table information used by the MMU is not valid. */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
+
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */
+
+/* This must always be a power of 2 */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
+/**
+ * A CPU mapping
+ */
+struct kbase_cpu_mapping {
+ struct list_head mappings_list;
+ struct kbase_mem_phy_alloc *alloc;
+ struct kbase_context *kctx;
+ struct kbase_va_region *region;
+ pgoff_t page_off;
+ int count;
+ unsigned long vm_start;
+ unsigned long vm_end;
+};
+
+enum kbase_memory_type {
+ KBASE_MEM_TYPE_NATIVE,
+ KBASE_MEM_TYPE_IMPORTED_UMP,
+ KBASE_MEM_TYPE_IMPORTED_UMM,
+ KBASE_MEM_TYPE_IMPORTED_USER_BUF,
+ KBASE_MEM_TYPE_ALIAS,
+ KBASE_MEM_TYPE_TB,
+ KBASE_MEM_TYPE_RAW
+};
+
+/* internal structure, mirroring base_mem_aliasing_info,
+ * but with alloc instead of a gpu va (handle) */
+struct kbase_aliased {
+ struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */
+ u64 offset; /* in pages */
+ u64 length; /* in pages */
+};
+
+/**
+ * @brief Physical pages tracking object properties
+ */
+#define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1ul << 0)
+#define KBASE_MEM_PHY_ALLOC_LARGE (1ul << 1)
+
+/* physical pages tracking object.
+ * Set up to track N pages.
+ * N not stored here, the creator holds that info.
+ * This object only tracks how many elements are actually valid (present).
+ * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc is not
+ * shared with another region or client. CPU mappings are OK to exist when changing, as
+ * long as the tracked mappings objects are updated as part of the change.
+ */
+struct kbase_mem_phy_alloc {
+ struct kref kref; /* number of users of this alloc */
+ atomic_t gpu_mappings;
+ size_t nents; /* 0..N */
+ phys_addr_t *pages; /* N elements, only 0..nents are valid */
+
+ /* kbase_cpu_mappings */
+ struct list_head mappings;
+
+ /* Node used to store this allocation on the eviction list */
+ struct list_head evict_node;
+ /* Physical backing size when the pages where evicted */
+ size_t evicted;
+ /*
+ * Back reference to the region structure which created this
+ * allocation, or NULL if it has been freed.
+ */
+ struct kbase_va_region *reg;
+
+ /* type of buffer */
+ enum kbase_memory_type type;
+
+ unsigned long properties;
+
+ struct list_head zone_cache;
+
+ /* member in union valid based on @a type */
+ union {
+#ifdef CONFIG_UMP
+ ump_dd_handle ump_handle;
+#endif /* CONFIG_UMP */
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ struct {
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *dma_attachment;
+ unsigned int current_mapping_usage_count;
+ struct sg_table *sgt;
+ } umm;
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+ struct {
+ u64 stride;
+ size_t nents;
+ struct kbase_aliased *aliased;
+ } alias;
+ /* Used by type = (KBASE_MEM_TYPE_NATIVE, KBASE_MEM_TYPE_TB) */
+ struct kbase_context *kctx;
+ struct {
+ unsigned long address;
+ unsigned long size;
+ unsigned long nr_pages;
+ struct page **pages;
+ unsigned int current_mapping_usage_count;
+ struct mm_struct *mm;
+ dma_addr_t *dma_addrs;
+ } user_buf;
+ } imported;
+};
+
+static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
+{
+ KBASE_DEBUG_ASSERT(alloc);
+ /* we only track mappings of NATIVE buffers */
+ if (alloc->type == KBASE_MEM_TYPE_NATIVE)
+ atomic_inc(&alloc->gpu_mappings);
+}
+
+static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
+{
+ KBASE_DEBUG_ASSERT(alloc);
+ /* we only track mappings of NATIVE buffers */
+ if (alloc->type == KBASE_MEM_TYPE_NATIVE)
+ if (0 > atomic_dec_return(&alloc->gpu_mappings)) {
+ pr_err("Mismatched %s:\n", __func__);
+ dump_stack();
+ }
+}
+
+void kbase_mem_kref_free(struct kref *kref);
+
+int kbase_mem_init(struct kbase_device *kbdev);
+void kbase_mem_halt(struct kbase_device *kbdev);
+void kbase_mem_term(struct kbase_device *kbdev);
+
+static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
+{
+ kref_get(&alloc->kref);
+ return alloc;
+}
+
+static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
+{
+ kref_put(&alloc->kref, kbase_mem_kref_free);
+ return NULL;
+}
+
+/**
+ * A GPU memory region, and attributes for CPU mappings.
+ */
+struct kbase_va_region {
+ struct rb_node rblink;
+ struct list_head link;
+
+ struct kbase_context *kctx; /* Backlink to base context */
+
+ u64 start_pfn; /* The PFN in GPU space */
+ size_t nr_pages;
+
+/* Free region */
+#define KBASE_REG_FREE (1ul << 0)
+/* CPU write access */
+#define KBASE_REG_CPU_WR (1ul << 1)
+/* GPU write access */
+#define KBASE_REG_GPU_WR (1ul << 2)
+/* No eXecute flag */
+#define KBASE_REG_GPU_NX (1ul << 3)
+/* Is CPU cached? */
+#define KBASE_REG_CPU_CACHED (1ul << 4)
+/* Is GPU cached? */
+#define KBASE_REG_GPU_CACHED (1ul << 5)
+
+#define KBASE_REG_GROWABLE (1ul << 6)
+/* Can grow on pf? */
+#define KBASE_REG_PF_GROW (1ul << 7)
+
+/* VA managed by us */
+#define KBASE_REG_CUSTOM_VA (1ul << 8)
+
+/* inner shareable coherency */
+#define KBASE_REG_SHARE_IN (1ul << 9)
+/* inner & outer shareable coherency */
+#define KBASE_REG_SHARE_BOTH (1ul << 10)
+
+/* Space for 4 different zones */
+#define KBASE_REG_ZONE_MASK (3ul << 11)
+#define KBASE_REG_ZONE(x) (((x) & 3) << 11)
+
+/* GPU read access */
+#define KBASE_REG_GPU_RD (1ul<<13)
+/* CPU read access */
+#define KBASE_REG_CPU_RD (1ul<<14)
+
+/* Aligned for GPU EX in SAME_VA */
+#define KBASE_REG_ALIGNED (1ul<<15)
+
+/* Index of chosen MEMATTR for this region (0..7) */
+#define KBASE_REG_MEMATTR_MASK (7ul << 16)
+#define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16)
+#define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
+
+#define KBASE_REG_SECURE (1ul << 19)
+
+#define KBASE_REG_DONT_NEED (1ul << 20)
+
+#define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
+
+/* only used with 32-bit clients */
+/*
+ * On a 32bit platform, custom VA should be wired from (4GB + shader region)
+ * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface
+ * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference).
+ * So we put the default limit to the maximum possible on Linux and shrink
+ * it down, if required by the GPU, during initialization.
+ */
+
+/*
+ * Dedicated 16MB region for shader code:
+ * VA range 0x101000000-0x102000000
+ */
+#define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1)
+#define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT)
+#define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT)
+
+#define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2)
+#define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */
+#define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
+/* end 32-bit clients only */
+
+ unsigned long flags;
+
+ size_t extent; /* nr of pages alloc'd on PF */
+
+ struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */
+ struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */
+
+ /* non-NULL if this memory object is a kds_resource */
+ struct kds_resource *kds_res;
+
+ /* List head used to store the region in the JIT allocation pool */
+ struct list_head jit_node;
+};
+
+/* Common functions */
+static inline phys_addr_t *kbase_get_cpu_phy_pages(struct kbase_va_region *reg)
+{
+ KBASE_DEBUG_ASSERT(reg);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+ return reg->cpu_alloc->pages;
+}
+
+static inline phys_addr_t *kbase_get_gpu_phy_pages(struct kbase_va_region *reg)
+{
+ KBASE_DEBUG_ASSERT(reg);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+ return reg->gpu_alloc->pages;
+}
+
+static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
+{
+ KBASE_DEBUG_ASSERT(reg);
+ /* if no alloc object the backed size naturally is 0 */
+ if (!reg->cpu_alloc)
+ return 0;
+
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+ return reg->cpu_alloc->nents;
+}
+
+#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
+
+static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
+{
+ struct kbase_mem_phy_alloc *alloc;
+ size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
+ size_t per_page_size = sizeof(*alloc->pages);
+
+ /* Imported pages may have page private data already in use */
+ if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
+ alloc_size += nr_pages *
+ sizeof(*alloc->imported.user_buf.dma_addrs);
+ per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs);
+ }
+
+ /*
+ * Prevent nr_pages*per_page_size + sizeof(*alloc) from
+ * wrapping around.
+ */
+ if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
+ / per_page_size))
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate based on the size to reduce internal fragmentation of vmem */
+ if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+ alloc = vzalloc(alloc_size);
+ else
+ alloc = kzalloc(alloc_size, GFP_KERNEL);
+
+ if (!alloc)
+ return ERR_PTR(-ENOMEM);
+
+ /* Store allocation method */
+ if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+ alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
+
+ kref_init(&alloc->kref);
+ atomic_set(&alloc->gpu_mappings, 0);
+ alloc->nents = 0;
+ alloc->pages = (void *)(alloc + 1);
+ INIT_LIST_HEAD(&alloc->mappings);
+ alloc->type = type;
+ INIT_LIST_HEAD(&alloc->zone_cache);
+
+ if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF)
+ alloc->imported.user_buf.dma_addrs =
+ (void *) (alloc->pages + nr_pages);
+
+ return alloc;
+}
+
+static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
+ struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(reg);
+ KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
+
+ reg->cpu_alloc = kbase_alloc_create(reg->nr_pages,
+ KBASE_MEM_TYPE_NATIVE);
+ if (IS_ERR(reg->cpu_alloc))
+ return PTR_ERR(reg->cpu_alloc);
+ else if (!reg->cpu_alloc)
+ return -ENOMEM;
+ reg->cpu_alloc->imported.kctx = kctx;
+ INIT_LIST_HEAD(&reg->cpu_alloc->evict_node);
+ if (kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE)
+ && (reg->flags & KBASE_REG_CPU_CACHED)) {
+ reg->gpu_alloc = kbase_alloc_create(reg->nr_pages,
+ KBASE_MEM_TYPE_NATIVE);
+ reg->gpu_alloc->imported.kctx = kctx;
+ INIT_LIST_HEAD(&reg->gpu_alloc->evict_node);
+ } else {
+ reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+ }
+
+ INIT_LIST_HEAD(&reg->jit_node);
+ reg->flags &= ~KBASE_REG_FREE;
+ return 0;
+}
+
+static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages)
+{
+ int new_val = atomic_add_return(num_pages, used_pages);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
+#endif
+ return new_val;
+}
+
+static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
+{
+ int new_val = atomic_sub_return(num_pages, used_pages);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
+#endif
+ return new_val;
+}
+
+/*
+ * Max size for kbdev memory pool (in pages)
+ */
+#define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)
+
+/*
+ * Max size for kctx memory pool (in pages)
+ */
+#define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT)
+
+/**
+ * kbase_mem_pool_init - Create a memory pool for a kbase device
+ * @pool: Memory pool to initialize
+ * @max_size: Maximum number of free pages the pool can hold
+ * @kbdev: Kbase device where memory is used
+ * @next_pool: Pointer to the next pool or NULL.
+ *
+ * Allocations from @pool are in whole pages. Each @pool has a free list where
+ * pages can be quickly allocated from. The free list is initially empty and
+ * filled whenever pages are freed back to the pool. The number of free pages
+ * in the pool will in general not exceed @max_size, but the pool may in
+ * certain corner cases grow above @max_size.
+ *
+ * If @next_pool is not NULL, we will allocate from @next_pool before going to
+ * the kernel allocator. Similarily pages can spill over to @next_pool when
+ * @pool is full. Pages are zeroed before they spill over to another pool, to
+ * prevent leaking information between applications.
+ *
+ * A shrinker is registered so that Linux mm can reclaim pages from the pool as
+ * needed.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_mem_pool_init(struct kbase_mem_pool *pool,
+ size_t max_size,
+ struct kbase_device *kbdev,
+ struct kbase_mem_pool *next_pool);
+
+/**
+ * kbase_mem_pool_term - Destroy a memory pool
+ * @pool: Memory pool to destroy
+ *
+ * Pages in the pool will spill over to @next_pool (if available) or freed to
+ * the kernel.
+ */
+void kbase_mem_pool_term(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_alloc - Allocate a page from memory pool
+ * @pool: Memory pool to allocate from
+ *
+ * Allocations from the pool are made as follows:
+ * 1. If there are free pages in the pool, allocate a page from @pool.
+ * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
+ * from @next_pool.
+ * 3. Return NULL if no memory in the pool
+ *
+ * Return: Pointer to allocated page, or NULL if allocation failed.
+ */
+struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_free - Free a page to memory pool
+ * @pool: Memory pool where page should be freed
+ * @page: Page to free to the pool
+ * @dirty: Whether some of the page may be dirty in the cache.
+ *
+ * Pages are freed to the pool as follows:
+ * 1. If @pool is not full, add @page to @pool.
+ * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
+ * @next_pool.
+ * 3. Finally, free @page to the kernel.
+ */
+void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
+ bool dirty);
+
+/**
+ * kbase_mem_pool_alloc_pages - Allocate pages from memory pool
+ * @pool: Memory pool to allocate from
+ * @nr_pages: Number of pages to allocate
+ * @pages: Pointer to array where the physical address of the allocated
+ * pages will be stored.
+ *
+ * Like kbase_mem_pool_alloc() but optimized for allocating many pages.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+ phys_addr_t *pages);
+
+/**
+ * kbase_mem_pool_free_pages - Free pages to memory pool
+ * @pool: Memory pool where pages should be freed
+ * @nr_pages: Number of pages to free
+ * @pages: Pointer to array holding the physical addresses of the pages to
+ * free.
+ * @dirty: Whether any pages may be dirty in the cache.
+ * @reclaimed: Whether the pages where reclaimable and thus should bypass
+ * the pool and go straight to the kernel.
+ *
+ * Like kbase_mem_pool_free() but optimized for freeing many pages.
+ */
+void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+ phys_addr_t *pages, bool dirty, bool reclaimed);
+
+/**
+ * kbase_mem_pool_size - Get number of free pages in memory pool
+ * @pool: Memory pool to inspect
+ *
+ * Note: the size of the pool may in certain corner cases exceed @max_size!
+ *
+ * Return: Number of free pages in the pool
+ */
+static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
+{
+ return ACCESS_ONCE(pool->cur_size);
+}
+
+/**
+ * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool
+ * @pool: Memory pool to inspect
+ *
+ * Return: Maximum number of free pages in the pool
+ */
+static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
+{
+ return pool->max_size;
+}
+
+
+/**
+ * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool
+ * @pool: Memory pool to inspect
+ * @max_size: Maximum number of free pages the pool can hold
+ *
+ * If @max_size is reduced, the pool will be shrunk to adhere to the new limit.
+ * For details see kbase_mem_pool_shrink().
+ */
+void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
+
+/**
+ * kbase_mem_pool_grow - Grow the pool
+ * @pool: Memory pool to grow
+ * @nr_to_grow: Number of pages to add to the pool
+ *
+ * Adds @nr_to_grow pages to the pool. Note that this may cause the pool to
+ * become larger than the maximum size specified.
+ *
+ * Returns: 0 on success, -ENOMEM if unable to allocate sufficent pages
+ */
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow);
+
+/**
+ * kbase_mem_pool_trim - Grow or shrink the pool to a new size
+ * @pool: Memory pool to trim
+ * @new_size: New number of pages in the pool
+ *
+ * If @new_size > @cur_size, fill the pool with new pages from the kernel, but
+ * not above the max_size for the pool.
+ * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel.
+ */
+void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
+
+/*
+ * kbase_mem_alloc_page - Allocate a new page for a device
+ * @kbdev: The kbase device
+ *
+ * Most uses should use kbase_mem_pool_alloc to allocate a page. However that
+ * function can fail in the event the pool is empty.
+ *
+ * Return: A new page or NULL if no memory
+ */
+struct page *kbase_mem_alloc_page(struct kbase_device *kbdev);
+
+int kbase_region_tracker_init(struct kbase_context *kctx);
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages);
+void kbase_region_tracker_term(struct kbase_context *kctx);
+
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr);
+
+/**
+ * @brief Check that a pointer is actually a valid region.
+ *
+ * Must be called with context lock held.
+ */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr);
+
+struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
+void kbase_free_alloced_region(struct kbase_va_region *reg);
+int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
+
+bool kbase_check_alloc_flags(unsigned long flags);
+bool kbase_check_import_flags(unsigned long flags);
+void kbase_update_region_flags(struct kbase_context *kctx,
+ struct kbase_va_region *reg, unsigned long flags);
+
+void kbase_gpu_vm_lock(struct kbase_context *kctx);
+void kbase_gpu_vm_unlock(struct kbase_context *kctx);
+
+int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
+
+int kbase_mmu_init(struct kbase_context *kctx);
+void kbase_mmu_term(struct kbase_context *kctx);
+
+phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx);
+void kbase_mmu_free_pgd(struct kbase_context *kctx);
+int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+ phys_addr_t *phys, size_t nr,
+ unsigned long flags);
+int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
+ phys_addr_t *phys, size_t nr,
+ unsigned long flags);
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+ phys_addr_t phys, size_t nr,
+ unsigned long flags);
+
+int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr);
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags);
+
+/**
+ * @brief Register region and map it on the GPU.
+ *
+ * Call kbase_add_va_region() and map the region on the GPU.
+ */
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
+
+/**
+ * @brief Remove the region from the GPU and unregister it.
+ *
+ * Must be called with context lock held.
+ */
+int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
+
+/**
+ * The caller has the following locking conditions:
+ * - It must hold kbase_device->mmu_hw_mutex
+ * - It must hold the hwaccess_lock
+ */
+void kbase_mmu_update(struct kbase_context *kctx);
+
+/**
+ * kbase_mmu_disable() - Disable the MMU for a previously active kbase context.
+ * @kctx: Kbase context
+ *
+ * Disable and perform the required cache maintenance to remove the all
+ * data from provided kbase context from the GPU caches.
+ *
+ * The caller has the following locking conditions:
+ * - It must hold kbase_device->mmu_hw_mutex
+ * - It must hold the hwaccess_lock
+ */
+void kbase_mmu_disable(struct kbase_context *kctx);
+
+/**
+ * kbase_mmu_disable_as() - Set the MMU to unmapped mode for the specified
+ * address space.
+ * @kbdev: Kbase device
+ * @as_nr: The address space number to set to unmapped.
+ *
+ * This function must only be called during reset/power-up and it used to
+ * ensure the registers are in a known state.
+ *
+ * The caller must hold kbdev->mmu_hw_mutex.
+ */
+void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+/** Dump the MMU tables to a buffer
+ *
+ * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the
+ * buffer is too small then the return value will be NULL.
+ *
+ * The GPU vm lock must be held when calling this function.
+ *
+ * The buffer returned should be freed with @ref vfree when it is no longer required.
+ *
+ * @param[in] kctx The kbase context to dump
+ * @param[in] nr_pages The number of pages to allocate for the buffer.
+ *
+ * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too
+ * small)
+ */
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
+
+int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset);
+void kbase_sync_single(struct kbase_context *kctx, phys_addr_t cpu_pa,
+ phys_addr_t gpu_pa, off_t offset, size_t size,
+ enum kbase_sync_type sync_fn);
+void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+
+/* OS specific functions */
+int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
+int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
+void kbase_os_mem_map_lock(struct kbase_context *kctx);
+void kbase_os_mem_map_unlock(struct kbase_context *kctx);
+
+/**
+ * @brief Update the memory allocation counters for the current process
+ *
+ * OS specific call to updates the current memory allocation counters for the current process with
+ * the supplied delta.
+ *
+ * @param[in] kctx The kbase context
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
+
+/**
+ * @brief Add to the memory allocation counters for the current process
+ *
+ * OS specific call to add to the current memory allocation counters for the current process by
+ * the supplied amount.
+ *
+ * @param[in] kctx The kernel base context used for the allocation.
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
+{
+ kbasep_os_process_page_usage_update(kctx, pages);
+}
+
+/**
+ * @brief Subtract from the memory allocation counters for the current process
+ *
+ * OS specific call to subtract from the current memory allocation counters for the current process by
+ * the supplied amount.
+ *
+ * @param[in] kctx The kernel base context used for the allocation.
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
+{
+ kbasep_os_process_page_usage_update(kctx, 0 - pages);
+}
+
+/**
+ * @brief Find the offset of the CPU mapping of a memory allocation containing
+ * a given address range
+ *
+ * Searches for a CPU mapping of any part of the region starting at @p gpu_addr
+ * that fully encloses the CPU virtual address range specified by @p uaddr and
+ * @p size. Returns a failure indication if only part of the address range lies
+ * within a CPU mapping, or the address range lies within a CPU mapping of a
+ * different region.
+ *
+ * @param[in,out] kctx The kernel base context used for the allocation.
+ * @param[in] gpu_addr GPU address of the start of the allocated region
+ * within which to search.
+ * @param[in] uaddr Start of the CPU virtual address range.
+ * @param[in] size Size of the CPU virtual address range (in bytes).
+ * @param[out] offset The offset from the start of the allocation to the
+ * specified CPU virtual address.
+ *
+ * @return 0 if offset was obtained successfully. Error code
+ * otherwise.
+ */
+int kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx,
+ u64 gpu_addr,
+ unsigned long uaddr,
+ size_t size,
+ u64 *offset);
+
+enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+* @brief Allocates physical pages.
+*
+* Allocates \a nr_pages_requested and updates the alloc object.
+*
+* @param[in] alloc allocation object to add pages to
+* @param[in] nr_pages_requested number of physical pages to allocate
+*
+* @return 0 if all pages have been successfully allocated. Error code otherwise
+*/
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested);
+
+/**
+* @brief Free physical pages.
+*
+* Frees \a nr_pages and updates the alloc object.
+*
+* @param[in] alloc allocation object to free pages from
+* @param[in] nr_pages_to_free number of physical pages to free
+*/
+int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
+
+static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
+{
+ SetPagePrivate(p);
+ if (sizeof(dma_addr_t) > sizeof(p->private)) {
+ /* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
+ * private field stays the same. So we have to be clever and
+ * use the fact that we only store DMA addresses of whole pages,
+ * so the low bits should be zero */
+ KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
+ set_page_private(p, dma_addr >> PAGE_SHIFT);
+ } else {
+ set_page_private(p, dma_addr);
+ }
+}
+
+static inline dma_addr_t kbase_dma_addr(struct page *p)
+{
+ if (sizeof(dma_addr_t) > sizeof(p->private))
+ return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
+
+ return (dma_addr_t)page_private(p);
+}
+
+static inline void kbase_clear_dma_addr(struct page *p)
+{
+ ClearPagePrivate(p);
+}
+
+/**
+* @brief Process a bus or page fault.
+*
+* This function will process a fault on a specific address space
+*
+* @param[in] kbdev The @ref kbase_device the fault happened on
+* @param[in] kctx The @ref kbase_context for the faulting address space if
+* one was found.
+* @param[in] as The address space that has the fault
+*/
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+ struct kbase_context *kctx, struct kbase_as *as);
+
+/**
+ * @brief Process a page fault.
+ *
+ * @param[in] data work_struct passed by queue_work()
+ */
+void page_fault_worker(struct work_struct *data);
+
+/**
+ * @brief Process a bus fault.
+ *
+ * @param[in] data work_struct passed by queue_work()
+ */
+void bus_fault_worker(struct work_struct *data);
+
+/**
+ * @brief Flush MMU workqueues.
+ *
+ * This function will cause any outstanding page or bus faults to be processed.
+ * It should be called prior to powering off the GPU.
+ *
+ * @param[in] kbdev Device pointer
+ */
+void kbase_flush_mmu_wqs(struct kbase_device *kbdev);
+
+/**
+ * kbase_sync_single_for_device - update physical memory and give GPU ownership
+ * @kbdev: Device pointer
+ * @handle: DMA address of region
+ * @size: Size of region to sync
+ * @dir: DMA data direction
+ */
+
+void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
+
+/**
+ * kbase_sync_single_for_cpu - update physical memory and give CPU ownership
+ * @kbdev: Device pointer
+ * @handle: DMA address of region
+ * @size: Size of region to sync
+ * @dir: DMA data direction
+ */
+
+void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * kbase_jit_debugfs_init - Add per context debugfs entry for JIT.
+ * @kctx: kbase context
+ */
+void kbase_jit_debugfs_init(struct kbase_context *kctx);
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * kbase_jit_init - Initialize the JIT memory pool management
+ * @kctx: kbase context
+ *
+ * Returns zero on success or negative error number on failure.
+ */
+int kbase_jit_init(struct kbase_context *kctx);
+
+/**
+ * kbase_jit_allocate - Allocate JIT memory
+ * @kctx: kbase context
+ * @info: JIT allocation information
+ *
+ * Return: JIT allocation on success or NULL on failure.
+ */
+struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
+ struct base_jit_alloc_info *info);
+
+/**
+ * kbase_jit_free - Free a JIT allocation
+ * @kctx: kbase context
+ * @reg: JIT allocation
+ *
+ * Frees a JIT allocation and places it into the free pool for later reuse.
+ */
+void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg);
+
+/**
+ * kbase_jit_backing_lost - Inform JIT that an allocation has lost backing
+ * @reg: JIT allocation
+ */
+void kbase_jit_backing_lost(struct kbase_va_region *reg);
+
+/**
+ * kbase_jit_evict - Evict a JIT allocation from the pool
+ * @kctx: kbase context
+ *
+ * Evict the least recently used JIT allocation from the pool. This can be
+ * required if normal VA allocations are failing due to VA exhaustion.
+ *
+ * Return: True if a JIT allocation was freed, false otherwise.
+ */
+bool kbase_jit_evict(struct kbase_context *kctx);
+
+/**
+ * kbase_jit_term - Terminate the JIT memory pool management
+ * @kctx: kbase context
+ */
+void kbase_jit_term(struct kbase_context *kctx);
+
+/**
+ * kbase_map_external_resource - Map an external resource to the GPU.
+ * @kctx: kbase context.
+ * @reg: The region to map.
+ * @locked_mm: The mm_struct which has been locked for this operation.
+ * @kds_res_count: The number of KDS resources.
+ * @kds_resources: Array of KDS resources.
+ * @kds_access_bitmap: Access bitmap for KDS.
+ * @exclusive: If the KDS resource requires exclusive access.
+ *
+ * Return: The physical allocation which backs the region on success or NULL
+ * on failure.
+ */
+struct kbase_mem_phy_alloc *kbase_map_external_resource(
+ struct kbase_context *kctx, struct kbase_va_region *reg,
+ struct mm_struct *locked_mm
+#ifdef CONFIG_KDS
+ , u32 *kds_res_count, struct kds_resource **kds_resources,
+ unsigned long *kds_access_bitmap, bool exclusive
+#endif
+ );
+
+/**
+ * kbase_unmap_external_resource - Unmap an external resource from the GPU.
+ * @kctx: kbase context.
+ * @reg: The region to unmap or NULL if it has already been released.
+ * @alloc: The physical allocation being unmapped.
+ */
+void kbase_unmap_external_resource(struct kbase_context *kctx,
+ struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_sticky_resource_init - Initialize sticky resource management.
+ * @kctx: kbase context
+ *
+ * Returns zero on success or negative error number on failure.
+ */
+int kbase_sticky_resource_init(struct kbase_context *kctx);
+
+/**
+ * kbase_sticky_resource_acquire - Acquire a reference on a sticky resource.
+ * @kctx: kbase context.
+ * @gpu_addr: The GPU address of the external resource.
+ *
+ * Return: The metadata object which represents the binding between the
+ * external resource and the kbase context on success or NULL on failure.
+ */
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
+ struct kbase_context *kctx, u64 gpu_addr);
+
+/**
+ * kbase_sticky_resource_release - Release a reference on a sticky resource.
+ * @kctx: kbase context.
+ * @meta: Binding metadata.
+ * @gpu_addr: GPU address of the external resource.
+ *
+ * If meta is NULL then gpu_addr will be used to scan the metadata list and
+ * find the matching metadata (if any), otherwise the provided meta will be
+ * used and gpu_addr will be ignored.
+ *
+ * Return: True if the release found the metadata and the reference was dropped.
+ */
+bool kbase_sticky_resource_release(struct kbase_context *kctx,
+ struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr);
+
+/**
+ * kbase_sticky_resource_term - Terminate sticky resource management.
+ * @kctx: kbase context
+ */
+void kbase_sticky_resource_term(struct kbase_context *kctx);
+
+/**
+ * kbase_zone_cache_update - Update the memory zone cache after new pages have
+ * been added.
+ * @alloc: The physical memory allocation to build the cache for.
+ * @start_offset: Offset to where the new pages start.
+ *
+ * Updates an existing memory zone cache, updating the counters for the
+ * various zones.
+ * If the memory allocation doesn't already have a zone cache assume that
+ * one isn't created and thus don't do anything.
+ *
+ * Return: Zero cache was updated, negative error code on error.
+ */
+int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc,
+ size_t start_offset);
+
+/**
+ * kbase_zone_cache_build - Build the memory zone cache.
+ * @alloc: The physical memory allocation to build the cache for.
+ *
+ * Create a new zone cache for the provided physical memory allocation if
+ * one doesn't already exist, if one does exist then just return.
+ *
+ * Return: Zero if the zone cache was created, negative error code on error.
+ */
+int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_zone_cache_clear - Clear the memory zone cache.
+ * @alloc: The physical memory allocation to clear the cache on.
+ */
+void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc);
+
+#endif /* _KBASE_MEM_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_mem_linux.c b/midgard-0.r2p0/mali_kbase_mem_linux.c
new file mode 100755
index 0000000..b6dac55
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_linux.c
@@ -0,0 +1,2819 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem_linux.c
+ * Base kernel memory APIs, Linux implementation.
+ */
+
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#include <linux/dma-attrs.h>
+#endif /* LINUX_VERSION_CODE >= 3.5.0 && < 4.8.0 */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+#include <linux/shrinker.h>
+#include <linux/cache.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_tlstream.h>
+
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
+static const struct vm_operations_struct kbase_vm_ops;
+
+/**
+ * kbase_mem_shrink_cpu_mapping - Shrink the CPU mapping(s) of an allocation
+ * @kctx: Context the region belongs to
+ * @reg: The GPU region
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Shrink (or completely remove) all CPU mappings which reference the shrunk
+ * part of the allocation.
+ *
+ * Note: Caller must be holding the processes mmap_sem lock.
+ */
+static int kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation
+ * @kctx: Context the region belongs to
+ * @reg: The GPU region or NULL if there isn't one
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Return: 0 on success, negative -errno on error
+ *
+ * Unmap the shrunk pages from the GPU mapping. Note that the size of the region
+ * itself is unmodified as we still need to reserve the VA, only the page tables
+ * will be modified by this function.
+ */
+static int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages);
+
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment)
+{
+ int zone;
+ int gpu_pc_bits;
+ int cpu_va_bits;
+ struct kbase_va_region *reg;
+ struct device *dev;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(flags);
+ KBASE_DEBUG_ASSERT(gpu_va);
+ KBASE_DEBUG_ASSERT(va_alignment);
+
+ dev = kctx->kbdev->dev;
+ *va_alignment = 0; /* no alignment by default */
+ *gpu_va = 0; /* return 0 on failure */
+
+ gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+ cpu_va_bits = BITS_PER_LONG;
+
+ if (0 == va_pages) {
+ dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!");
+ goto bad_size;
+ }
+
+ if (va_pages > (U64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+#if defined(CONFIG_64BIT)
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ cpu_va_bits = 32;
+#endif
+
+ if (!kbase_check_alloc_flags(*flags)) {
+ dev_warn(dev,
+ "kbase_mem_alloc called with bad flags (%llx)",
+ (unsigned long long)*flags);
+ goto bad_flags;
+ }
+
+ if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
+ !kbase_device_is_cpu_coherent(kctx->kbdev)) {
+ dev_warn(dev, "kbase_mem_alloc call required coherent mem when unavailable");
+ goto bad_flags;
+ }
+ if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 &&
+ !kbase_device_is_cpu_coherent(kctx->kbdev)) {
+ /* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */
+ *flags &= ~BASE_MEM_COHERENT_SYSTEM;
+ }
+
+ /* Limit GPU executable allocs to GPU PC size */
+ if ((*flags & BASE_MEM_PROT_GPU_EX) &&
+ (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT)))
+ goto bad_ex_size;
+
+ /* find out which VA zone to use */
+ if (*flags & BASE_MEM_SAME_VA)
+ zone = KBASE_REG_ZONE_SAME_VA;
+ else if (*flags & BASE_MEM_PROT_GPU_EX)
+ zone = KBASE_REG_ZONE_EXEC;
+ else
+ zone = KBASE_REG_ZONE_CUSTOM_VA;
+
+ reg = kbase_alloc_free_region(kctx, 0, va_pages, zone);
+ if (!reg) {
+ dev_err(dev, "Failed to allocate free region");
+ goto no_region;
+ }
+
+ kbase_update_region_flags(kctx, reg, *flags);
+
+ if (kbase_reg_prepare_native(reg, kctx) != 0) {
+ dev_err(dev, "Failed to prepare region");
+ goto prepare_failed;
+ }
+
+ if (*flags & BASE_MEM_GROW_ON_GPF)
+ reg->extent = extent;
+ else
+ reg->extent = 0;
+
+ if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) {
+ dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)",
+ (unsigned long long)commit_pages,
+ (unsigned long long)va_pages);
+ goto no_mem;
+ }
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* mmap needed to setup VA? */
+ if (*flags & BASE_MEM_SAME_VA) {
+ unsigned long prot = PROT_NONE;
+ unsigned long va_size = va_pages << PAGE_SHIFT;
+ unsigned long va_map = va_size;
+ unsigned long cookie, cookie_nr;
+ unsigned long cpu_addr;
+
+ /* Bind to a cookie */
+ if (!kctx->cookies) {
+ dev_err(dev, "No cookies available for allocation!");
+ kbase_gpu_vm_unlock(kctx);
+ goto no_cookie;
+ }
+ /* return a cookie */
+ cookie_nr = __ffs(kctx->cookies);
+ kctx->cookies &= ~(1UL << cookie_nr);
+ BUG_ON(kctx->pending_regions[cookie_nr]);
+ kctx->pending_regions[cookie_nr] = reg;
+
+ kbase_gpu_vm_unlock(kctx);
+
+ /* relocate to correct base */
+ cookie = cookie_nr + PFN_DOWN(BASE_MEM_COOKIE_BASE);
+ cookie <<= PAGE_SHIFT;
+
+ /* See if we must align memory due to GPU PC bits vs CPU VA */
+ if ((*flags & BASE_MEM_PROT_GPU_EX) &&
+ (cpu_va_bits > gpu_pc_bits)) {
+ *va_alignment = gpu_pc_bits;
+ reg->flags |= KBASE_REG_ALIGNED;
+ }
+
+ /*
+ * 10.1-10.4 UKU userland relies on the kernel to call mmap.
+ * For all other versions we can just return the cookie
+ */
+ if (kctx->api_version < KBASE_API_VERSION(10, 1) ||
+ kctx->api_version > KBASE_API_VERSION(10, 4)) {
+ *gpu_va = (u64) cookie;
+ return reg;
+ }
+
+ /*
+ * To achieve alignment and avoid allocating on large alignment
+ * (to work around a GPU hardware issue) we must allocate 3
+ * times the required size.
+ */
+ if (*va_alignment)
+ va_map += 3 * (1UL << *va_alignment);
+
+ if (*flags & BASE_MEM_PROT_CPU_RD)
+ prot |= PROT_READ;
+ if (*flags & BASE_MEM_PROT_CPU_WR)
+ prot |= PROT_WRITE;
+
+ cpu_addr = vm_mmap(kctx->filp, 0, va_map, prot,
+ MAP_SHARED, cookie);
+
+ if (IS_ERR_VALUE(cpu_addr)) {
+ kbase_gpu_vm_lock(kctx);
+ kctx->pending_regions[cookie_nr] = NULL;
+ kctx->cookies |= (1UL << cookie_nr);
+ kbase_gpu_vm_unlock(kctx);
+ goto no_mmap;
+ }
+
+ /*
+ * If we had to allocate extra VA space to force the
+ * alignment release it.
+ */
+ if (*va_alignment) {
+ unsigned long alignment = 1UL << *va_alignment;
+ unsigned long align_mask = alignment - 1;
+ unsigned long addr;
+ unsigned long addr_end;
+ unsigned long aligned_addr;
+ unsigned long aligned_addr_end;
+
+ addr = cpu_addr;
+ addr_end = addr + va_map;
+
+ aligned_addr = (addr + align_mask) &
+ ~((u64) align_mask);
+ aligned_addr_end = aligned_addr + va_size;
+
+ if ((aligned_addr_end & BASE_MEM_MASK_4GB) == 0) {
+ /*
+ * Can't end at 4GB boundary on some GPUs as
+ * it will halt the shader.
+ */
+ aligned_addr += 2 * alignment;
+ aligned_addr_end += 2 * alignment;
+ } else if ((aligned_addr & BASE_MEM_MASK_4GB) == 0) {
+ /*
+ * Can't start at 4GB boundary on some GPUs as
+ * it will halt the shader.
+ */
+ aligned_addr += alignment;
+ aligned_addr_end += alignment;
+ }
+
+ /* anything to chop off at the start? */
+ if (addr != aligned_addr)
+ vm_munmap(addr, aligned_addr - addr);
+
+ /* anything at the end? */
+ if (addr_end != aligned_addr_end)
+ vm_munmap(aligned_addr_end,
+ addr_end - aligned_addr_end);
+
+ *gpu_va = (u64) aligned_addr;
+ } else
+ *gpu_va = (u64) cpu_addr;
+ } else /* we control the VA */ {
+ if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) {
+ dev_warn(dev, "Failed to map memory on GPU");
+ kbase_gpu_vm_unlock(kctx);
+ goto no_mmap;
+ }
+ /* return real GPU VA */
+ *gpu_va = reg->start_pfn << PAGE_SHIFT;
+
+ kbase_gpu_vm_unlock(kctx);
+ }
+
+ return reg;
+
+no_mmap:
+no_cookie:
+no_mem:
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+prepare_failed:
+ kfree(reg);
+no_region:
+bad_ex_size:
+bad_flags:
+bad_size:
+ return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_mem_alloc);
+
+int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * const out)
+{
+ struct kbase_va_region *reg;
+ int ret = -EINVAL;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(out);
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ switch (query) {
+ case KBASE_MEM_QUERY_COMMIT_SIZE:
+ if (reg->cpu_alloc->type != KBASE_MEM_TYPE_ALIAS) {
+ *out = kbase_reg_current_backed_size(reg);
+ } else {
+ size_t i;
+ struct kbase_aliased *aliased;
+ *out = 0;
+ aliased = reg->cpu_alloc->imported.alias.aliased;
+ for (i = 0; i < reg->cpu_alloc->imported.alias.nents; i++)
+ *out += aliased[i].length;
+ }
+ break;
+ case KBASE_MEM_QUERY_VA_SIZE:
+ *out = reg->nr_pages;
+ break;
+ case KBASE_MEM_QUERY_FLAGS:
+ {
+ *out = 0;
+ if (KBASE_REG_CPU_WR & reg->flags)
+ *out |= BASE_MEM_PROT_CPU_WR;
+ if (KBASE_REG_CPU_RD & reg->flags)
+ *out |= BASE_MEM_PROT_CPU_RD;
+ if (KBASE_REG_CPU_CACHED & reg->flags)
+ *out |= BASE_MEM_CACHED_CPU;
+ if (KBASE_REG_GPU_WR & reg->flags)
+ *out |= BASE_MEM_PROT_GPU_WR;
+ if (KBASE_REG_GPU_RD & reg->flags)
+ *out |= BASE_MEM_PROT_GPU_RD;
+ if (!(KBASE_REG_GPU_NX & reg->flags))
+ *out |= BASE_MEM_PROT_GPU_EX;
+ if (KBASE_REG_SHARE_BOTH & reg->flags)
+ *out |= BASE_MEM_COHERENT_SYSTEM;
+ if (KBASE_REG_SHARE_IN & reg->flags)
+ *out |= BASE_MEM_COHERENT_LOCAL;
+ break;
+ }
+ default:
+ *out = 0;
+ goto out_unlock;
+ }
+
+ ret = 0;
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return ret;
+}
+
+/**
+ * kbase_mem_evictable_reclaim_count_objects - Count number of pages in the
+ * Ephemeral memory eviction list.
+ * @s: Shrinker
+ * @sc: Shrinker control
+ *
+ * Return: Number of pages which can be freed.
+ */
+static
+unsigned long kbase_mem_evictable_reclaim_count_objects(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ struct kbase_context *kctx;
+ struct kbase_mem_phy_alloc *alloc;
+ unsigned long pages = 0;
+
+ kctx = container_of(s, struct kbase_context, reclaim);
+
+ mutex_lock(&kctx->evict_lock);
+
+ list_for_each_entry(alloc, &kctx->evict_list, evict_node)
+ pages += alloc->nents;
+
+ mutex_unlock(&kctx->evict_lock);
+ return pages;
+}
+
+/**
+ * kbase_mem_evictable_reclaim_scan_objects - Scan the Ephemeral memory eviction
+ * list for pages and try to reclaim them.
+ * @s: Shrinker
+ * @sc: Shrinker control
+ *
+ * Return: Number of pages freed (can be less then requested) or -1 if the
+ * shrinker failed to free pages in its pool.
+ *
+ * Note:
+ * This function accesses region structures without taking the region lock,
+ * this is required as the OOM killer can call the shrinker after the region
+ * lock has already been held.
+ * This is safe as we can guarantee that a region on the eviction list will
+ * not be freed (kbase_mem_free_region removes the allocation from the list
+ * before destroying it), or modified by other parts of the driver.
+ * The eviction list itself is guarded by the eviction lock and the MMU updates
+ * are protected by their own lock.
+ */
+static
+unsigned long kbase_mem_evictable_reclaim_scan_objects(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ struct kbase_context *kctx;
+ struct kbase_mem_phy_alloc *alloc;
+ struct kbase_mem_phy_alloc *tmp;
+ unsigned long freed = 0;
+
+ kctx = container_of(s, struct kbase_context, reclaim);
+ mutex_lock(&kctx->evict_lock);
+
+ list_for_each_entry_safe(alloc, tmp, &kctx->evict_list, evict_node) {
+ int err;
+
+ err = kbase_mem_shrink_gpu_mapping(kctx, alloc->reg,
+ 0, alloc->nents);
+ if (err != 0) {
+ /*
+ * Failed to remove GPU mapping, tell the shrinker
+ * to stop trying to shrink our slab even though we
+ * have pages in it.
+ */
+ freed = -1;
+ goto out_unlock;
+ }
+
+ /*
+ * Update alloc->evicted before freeing the backing so the
+ * helper can determine that it needs to bypass the accounting
+ * and memory pool.
+ */
+ alloc->evicted = alloc->nents;
+
+ kbase_free_phy_pages_helper(alloc, alloc->evicted);
+ freed += alloc->evicted;
+ list_del_init(&alloc->evict_node);
+
+ /*
+ * Inform the JIT allocator this region has lost backing
+ * as it might need to free the allocation.
+ */
+ kbase_jit_backing_lost(alloc->reg);
+
+ /* Enough pages have been freed so stop now */
+ if (freed > sc->nr_to_scan)
+ break;
+ }
+out_unlock:
+ mutex_unlock(&kctx->evict_lock);
+
+ return freed;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int kbase_mem_evictable_reclaim_shrink(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ if (sc->nr_to_scan == 0)
+ return kbase_mem_evictable_reclaim_count_objects(s, sc);
+
+ return kbase_mem_evictable_reclaim_scan_objects(s, sc);
+}
+#endif
+
+int kbase_mem_evictable_init(struct kbase_context *kctx)
+{
+ INIT_LIST_HEAD(&kctx->evict_list);
+ mutex_init(&kctx->evict_lock);
+
+ /* Register shrinker */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ kctx->reclaim.shrink = kbase_mem_evictable_reclaim_shrink;
+#else
+ kctx->reclaim.count_objects = kbase_mem_evictable_reclaim_count_objects;
+ kctx->reclaim.scan_objects = kbase_mem_evictable_reclaim_scan_objects;
+#endif
+ kctx->reclaim.seeks = DEFAULT_SEEKS;
+ /* Kernel versions prior to 3.1 :
+ * struct shrinker does not define batch */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+ kctx->reclaim.batch = 0;
+#endif
+ register_shrinker(&kctx->reclaim);
+ return 0;
+}
+
+void kbase_mem_evictable_deinit(struct kbase_context *kctx)
+{
+ unregister_shrinker(&kctx->reclaim);
+}
+
+struct kbase_mem_zone_cache_entry {
+ /* List head used to link the cache entry to the memory allocation. */
+ struct list_head zone_node;
+ /* The zone the cacheline is for. */
+ struct zone *zone;
+ /* The number of pages in the allocation which belong to this zone. */
+ u64 count;
+};
+
+static bool kbase_zone_cache_builder(struct kbase_mem_phy_alloc *alloc,
+ size_t start_offset)
+{
+ struct kbase_mem_zone_cache_entry *cache = NULL;
+ size_t i;
+ int ret = 0;
+
+ for (i = start_offset; i < alloc->nents; i++) {
+ struct page *p = phys_to_page(alloc->pages[i]);
+ struct zone *zone = page_zone(p);
+ bool create = true;
+
+ if (cache && (cache->zone == zone)) {
+ /*
+ * Fast path check as most of the time adjacent
+ * pages come from the same zone.
+ */
+ create = false;
+ } else {
+ /*
+ * Slow path check, walk all the cache entries to see
+ * if we already know about this zone.
+ */
+ list_for_each_entry(cache, &alloc->zone_cache, zone_node) {
+ if (cache->zone == zone) {
+ create = false;
+ break;
+ }
+ }
+ }
+
+ /* This zone wasn't found in the cache, create an entry for it */
+ if (create) {
+ cache = kmalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ cache->zone = zone;
+ cache->count = 0;
+ list_add(&cache->zone_node, &alloc->zone_cache);
+ }
+
+ cache->count++;
+ }
+ return 0;
+
+bail:
+ return ret;
+}
+
+int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc,
+ size_t start_offset)
+{
+ /*
+ * Bail if the zone cache is empty, only update the cache if it
+ * existed in the first place.
+ */
+ if (list_empty(&alloc->zone_cache))
+ return 0;
+
+ return kbase_zone_cache_builder(alloc, start_offset);
+}
+
+int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc)
+{
+ /* Bail if the zone cache already exists */
+ if (!list_empty(&alloc->zone_cache))
+ return 0;
+
+ return kbase_zone_cache_builder(alloc, 0);
+}
+
+void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc)
+{
+ struct kbase_mem_zone_cache_entry *walker;
+
+ while(!list_empty(&alloc->zone_cache)){
+ walker = list_first_entry(&alloc->zone_cache,
+ struct kbase_mem_zone_cache_entry, zone_node);
+ list_del(&walker->zone_node);
+ kfree(walker);
+ }
+}
+
+/**
+ * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
+ * @alloc: The physical allocation
+ */
+static void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc)
+{
+ struct kbase_context *kctx = alloc->imported.kctx;
+ struct kbase_mem_zone_cache_entry *zone_cache;
+ int __maybe_unused new_page_count;
+ int err;
+
+ /* Attempt to build a zone cache of tracking */
+ err = kbase_zone_cache_build(alloc);
+ if (err == 0) {
+ /* Bulk update all the zones */
+ list_for_each_entry(zone_cache, &alloc->zone_cache, zone_node) {
+ zone_page_state_add(zone_cache->count,
+ zone_cache->zone, NR_SLAB_RECLAIMABLE);
+ }
+ } else {
+ /* Fall-back to page by page updates */
+ int i;
+
+ for (i = 0; i < alloc->nents; i++) {
+ struct page *p = phys_to_page(alloc->pages[i]);
+ struct zone *zone = page_zone(p);
+
+ zone_page_state_add(1, zone, NR_SLAB_RECLAIMABLE);
+ }
+ }
+
+ kbase_process_page_usage_dec(kctx, alloc->nents);
+ new_page_count = kbase_atomic_sub_pages(alloc->nents,
+ &kctx->used_pages);
+ kbase_atomic_sub_pages(alloc->nents, &kctx->kbdev->memdev.used_pages);
+
+ kbase_tlstream_aux_pagesalloc(
+ (u32)kctx->id,
+ (u64)new_page_count);
+}
+
+/**
+ * kbase_mem_evictable_unmark_reclaim - Mark the pages as no longer reclaimable.
+ * @alloc: The physical allocation
+ */
+static
+void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc)
+{
+ struct kbase_context *kctx = alloc->imported.kctx;
+ struct kbase_mem_zone_cache_entry *zone_cache;
+ int __maybe_unused new_page_count;
+ int err;
+
+ new_page_count = kbase_atomic_add_pages(alloc->nents,
+ &kctx->used_pages);
+ kbase_atomic_add_pages(alloc->nents, &kctx->kbdev->memdev.used_pages);
+
+ /* Increase mm counters so that the allocation is accounted for
+ * against the process and thus is visible to the OOM killer,
+ * then remove it from the reclaimable accounting. */
+ kbase_process_page_usage_inc(kctx, alloc->nents);
+
+ /* Attempt to build a zone cache of tracking */
+ err = kbase_zone_cache_build(alloc);
+ if (err == 0) {
+ /* Bulk update all the zones */
+ list_for_each_entry(zone_cache, &alloc->zone_cache, zone_node) {
+ zone_page_state_add(-zone_cache->count,
+ zone_cache->zone, NR_SLAB_RECLAIMABLE);
+ }
+ } else {
+ /* Fall-back to page by page updates */
+ int i;
+
+ for (i = 0; i < alloc->nents; i++) {
+ struct page *p = phys_to_page(alloc->pages[i]);
+ struct zone *zone = page_zone(p);
+
+ zone_page_state_add(-1, zone, NR_SLAB_RECLAIMABLE);
+ }
+ }
+
+ kbase_tlstream_aux_pagesalloc(
+ (u32)kctx->id,
+ (u64)new_page_count);
+}
+
+int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc)
+{
+ struct kbase_context *kctx = gpu_alloc->imported.kctx;
+ int err;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /* This alloction can't already be on a list. */
+ WARN_ON(!list_empty(&gpu_alloc->evict_node));
+
+ /*
+ * Try to shrink the CPU mappings as required, if we fail then
+ * fail the process of making this allocation evictable.
+ */
+ err = kbase_mem_shrink_cpu_mapping(kctx, gpu_alloc->reg,
+ 0, gpu_alloc->nents);
+ if (err)
+ return -EINVAL;
+
+ /*
+ * Add the allocation to the eviction list, after this point the shrink
+ * can reclaim it.
+ */
+ mutex_lock(&kctx->evict_lock);
+ list_add(&gpu_alloc->evict_node, &kctx->evict_list);
+ mutex_unlock(&kctx->evict_lock);
+ kbase_mem_evictable_mark_reclaim(gpu_alloc);
+
+ gpu_alloc->reg->flags |= KBASE_REG_DONT_NEED;
+ return 0;
+}
+
+bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *gpu_alloc)
+{
+ struct kbase_context *kctx = gpu_alloc->imported.kctx;
+ int err = 0;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /*
+ * First remove the allocation from the eviction list as it's no
+ * longer eligible for eviction.
+ */
+ mutex_lock(&kctx->evict_lock);
+ list_del_init(&gpu_alloc->evict_node);
+ mutex_unlock(&kctx->evict_lock);
+
+ if (gpu_alloc->evicted == 0) {
+ /*
+ * The backing is still present, update the VM stats as it's
+ * in use again.
+ */
+ kbase_mem_evictable_unmark_reclaim(gpu_alloc);
+ } else {
+ /* If the region is still alive ... */
+ if (gpu_alloc->reg) {
+ /* ... allocate replacement backing ... */
+ err = kbase_alloc_phy_pages_helper(gpu_alloc,
+ gpu_alloc->evicted);
+
+ /*
+ * ... and grow the mapping back to its
+ * pre-eviction size.
+ */
+ if (!err)
+ err = kbase_mem_grow_gpu_mapping(kctx,
+ gpu_alloc->reg,
+ gpu_alloc->evicted, 0);
+
+ gpu_alloc->evicted = 0;
+ }
+ }
+
+ /* If the region is still alive remove the DONT_NEED attribute. */
+ if (gpu_alloc->reg)
+ gpu_alloc->reg->flags &= ~KBASE_REG_DONT_NEED;
+
+ return (err == 0);
+}
+
+int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask)
+{
+ struct kbase_va_region *reg;
+ int ret = -EINVAL;
+ unsigned int real_flags = 0;
+ unsigned int prev_flags = 0;
+ bool prev_needed, new_needed;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ if (!gpu_addr)
+ return -EINVAL;
+
+ /* nuke other bits */
+ flags &= mask;
+
+ /* check for only supported flags */
+ if (flags & ~(BASE_MEM_FLAGS_MODIFIABLE))
+ goto out;
+
+ /* mask covers bits we don't support? */
+ if (mask & ~(BASE_MEM_FLAGS_MODIFIABLE))
+ goto out;
+
+ /* convert flags */
+ if (BASE_MEM_COHERENT_SYSTEM & flags)
+ real_flags |= KBASE_REG_SHARE_BOTH;
+ else if (BASE_MEM_COHERENT_LOCAL & flags)
+ real_flags |= KBASE_REG_SHARE_IN;
+
+ /* now we can lock down the context, and find the region */
+ down_write(&current->mm->mmap_sem);
+ kbase_gpu_vm_lock(kctx);
+
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ /* Is the region being transitioning between not needed and needed? */
+ prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
+ new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
+ if (prev_needed != new_needed) {
+ /* Aliased allocations can't be made ephemeral */
+ if (atomic_read(&reg->cpu_alloc->gpu_mappings) > 1)
+ goto out_unlock;
+
+ if (new_needed) {
+ /* Only native allocations can be marked not needed */
+ if (reg->cpu_alloc->type != KBASE_MEM_TYPE_NATIVE) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ret = kbase_mem_evictable_make(reg->gpu_alloc);
+ if (ret)
+ goto out_unlock;
+ } else {
+ kbase_mem_evictable_unmake(reg->gpu_alloc);
+ }
+ }
+
+ /* limit to imported memory */
+ if ((reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) &&
+ (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
+ goto out_unlock;
+
+ /* no change? */
+ if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ /* save for roll back */
+ prev_flags = reg->flags;
+ reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
+ reg->flags |= real_flags;
+
+ /* Currently supporting only imported memory */
+ switch (reg->gpu_alloc->type) {
+#ifdef CONFIG_UMP
+ case KBASE_MEM_TYPE_IMPORTED_UMP:
+ ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_cpu_phy_pages(reg), reg->gpu_alloc->nents, reg->flags);
+ break;
+#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM:
+ /* Future use will use the new flags, existing mapping will NOT be updated
+ * as memory should not be in use by the GPU when updating the flags.
+ */
+ ret = 0;
+ WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ /* roll back on error, i.e. not UMP */
+ if (ret)
+ reg->flags = prev_flags;
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ up_write(&current->mm->mmap_sem);
+out:
+ return ret;
+}
+
+#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS)
+
+#ifdef CONFIG_UMP
+static struct kbase_va_region *kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags)
+{
+ struct kbase_va_region *reg;
+ ump_dd_handle umph;
+ u64 block_count;
+ const ump_dd_physical_block_64 *block_array;
+ u64 i, j;
+ int page = 0;
+ ump_alloc_flags ump_flags;
+ ump_alloc_flags cpu_flags;
+ ump_alloc_flags gpu_flags;
+
+ if (*flags & BASE_MEM_SECURE)
+ goto bad_flags;
+
+ umph = ump_dd_from_secure_id(id);
+ if (UMP_DD_INVALID_MEMORY_HANDLE == umph)
+ goto bad_id;
+
+ ump_flags = ump_dd_allocation_flags_get(umph);
+ cpu_flags = (ump_flags >> UMP_DEVICE_CPU_SHIFT) & UMP_DEVICE_MASK;
+ gpu_flags = (ump_flags >> DEFAULT_UMP_GPU_DEVICE_SHIFT) &
+ UMP_DEVICE_MASK;
+
+ *va_pages = ump_dd_size_get_64(umph);
+ *va_pages >>= PAGE_SHIFT;
+
+ if (!*va_pages)
+ goto bad_size;
+
+ if (*va_pages > (U64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ if (*flags & BASE_MEM_SAME_VA)
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
+ else
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
+
+ if (!reg)
+ goto no_region;
+
+ /* we've got pages to map now, and support SAME_VA */
+ *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
+
+ reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc))
+ goto no_alloc_obj;
+
+ reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ reg->gpu_alloc->imported.ump_handle = umph;
+
+ reg->flags &= ~KBASE_REG_FREE;
+ reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */
+ reg->flags &= ~KBASE_REG_GROWABLE; /* UMP cannot be grown */
+
+ /* Override import flags based on UMP flags */
+ *flags &= ~(BASE_MEM_CACHED_CPU);
+ *flags &= ~(BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR);
+ *flags &= ~(BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR);
+
+ if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
+ (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) {
+ reg->flags |= KBASE_REG_CPU_CACHED;
+ *flags |= BASE_MEM_CACHED_CPU;
+ }
+
+ if (cpu_flags & UMP_PROT_CPU_WR) {
+ reg->flags |= KBASE_REG_CPU_WR;
+ *flags |= BASE_MEM_PROT_CPU_WR;
+ }
+
+ if (cpu_flags & UMP_PROT_CPU_RD) {
+ reg->flags |= KBASE_REG_CPU_RD;
+ *flags |= BASE_MEM_PROT_CPU_RD;
+ }
+
+ if ((gpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
+ (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR))
+ reg->flags |= KBASE_REG_GPU_CACHED;
+
+ if (gpu_flags & UMP_PROT_DEVICE_WR) {
+ reg->flags |= KBASE_REG_GPU_WR;
+ *flags |= BASE_MEM_PROT_GPU_WR;
+ }
+
+ if (gpu_flags & UMP_PROT_DEVICE_RD) {
+ reg->flags |= KBASE_REG_GPU_RD;
+ *flags |= BASE_MEM_PROT_GPU_RD;
+ }
+
+ /* ump phys block query */
+ ump_dd_phys_blocks_get_64(umph, &block_count, &block_array);
+
+ for (i = 0; i < block_count; i++) {
+ for (j = 0; j < (block_array[i].size >> PAGE_SHIFT); j++) {
+ reg->gpu_alloc->pages[page] = block_array[i].addr + (j << PAGE_SHIFT);
+ page++;
+ }
+ }
+ reg->gpu_alloc->nents = *va_pages;
+ reg->extent = 0;
+
+ return reg;
+
+no_alloc_obj:
+ kfree(reg);
+no_region:
+bad_size:
+ ump_dd_release(umph);
+bad_id:
+bad_flags:
+ return NULL;
+}
+#endif /* CONFIG_UMP */
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags)
+{
+ struct kbase_va_region *reg;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *dma_attachment;
+ bool shared_zone = false;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dma_buf))
+ goto no_buf;
+
+ dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev);
+ if (!dma_attachment)
+ goto no_attachment;
+
+ *va_pages = PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT;
+ if (!*va_pages)
+ goto bad_size;
+
+ if (*va_pages > (U64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ /* ignore SAME_VA */
+ *flags &= ~BASE_MEM_SAME_VA;
+
+ if (*flags & BASE_MEM_IMPORT_SHARED)
+ shared_zone = true;
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /*
+ * 64-bit tasks require us to reserve VA on the CPU that we use
+ * on the GPU.
+ */
+ shared_zone = true;
+ }
+#endif
+
+ if (shared_zone) {
+ *flags |= BASE_MEM_NEED_MMAP;
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
+ } else {
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
+ }
+
+ if (!reg)
+ goto no_region;
+
+ reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMM);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc))
+ goto no_alloc_obj;
+
+ reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ /* No pages to map yet */
+ reg->gpu_alloc->nents = 0;
+
+ reg->flags &= ~KBASE_REG_FREE;
+ reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */
+ reg->flags &= ~KBASE_REG_GROWABLE; /* UMM cannot be grown */
+ reg->flags |= KBASE_REG_GPU_CACHED;
+
+ if (*flags & BASE_MEM_PROT_CPU_WR)
+ reg->flags |= KBASE_REG_CPU_WR;
+
+ if (*flags & BASE_MEM_PROT_CPU_RD)
+ reg->flags |= KBASE_REG_CPU_RD;
+
+ if (*flags & BASE_MEM_PROT_GPU_WR)
+ reg->flags |= KBASE_REG_GPU_WR;
+
+ if (*flags & BASE_MEM_PROT_GPU_RD)
+ reg->flags |= KBASE_REG_GPU_RD;
+
+ if (*flags & BASE_MEM_SECURE)
+ reg->flags |= KBASE_REG_SECURE;
+
+ /* no read or write permission given on import, only on run do we give the right permissions */
+
+ reg->gpu_alloc->type = KBASE_MEM_TYPE_IMPORTED_UMM;
+ reg->gpu_alloc->imported.umm.sgt = NULL;
+ reg->gpu_alloc->imported.umm.dma_buf = dma_buf;
+ reg->gpu_alloc->imported.umm.dma_attachment = dma_attachment;
+ reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0;
+ reg->extent = 0;
+
+ return reg;
+
+no_alloc_obj:
+ kfree(reg);
+no_region:
+bad_size:
+ dma_buf_detach(dma_buf, dma_attachment);
+no_attachment:
+ dma_buf_put(dma_buf);
+no_buf:
+ return NULL;
+}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+
+static struct kbase_va_region *kbase_mem_from_user_buffer(
+ struct kbase_context *kctx, unsigned long address,
+ unsigned long size, u64 *va_pages, u64 *flags)
+{
+ struct kbase_va_region *reg;
+ long faulted_pages;
+ int zone = KBASE_REG_ZONE_CUSTOM_VA;
+ bool shared_zone = false;
+
+ *va_pages = (PAGE_ALIGN(address + size) >> PAGE_SHIFT) -
+ PFN_DOWN(address);
+ if (!*va_pages)
+ goto bad_size;
+
+ if (*va_pages > (UINT64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ /* SAME_VA generally not supported with imported memory (no known use cases) */
+ *flags &= ~BASE_MEM_SAME_VA;
+
+ if (*flags & BASE_MEM_IMPORT_SHARED)
+ shared_zone = true;
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /*
+ * 64-bit tasks require us to reserve VA on the CPU that we use
+ * on the GPU.
+ */
+ shared_zone = true;
+ }
+#endif
+
+ if (shared_zone) {
+ *flags |= BASE_MEM_NEED_MMAP;
+ zone = KBASE_REG_ZONE_SAME_VA;
+ }
+
+ reg = kbase_alloc_free_region(kctx, 0, *va_pages, zone);
+
+ if (!reg)
+ goto no_region;
+
+ reg->gpu_alloc = kbase_alloc_create(*va_pages,
+ KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc))
+ goto no_alloc_obj;
+
+ reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ reg->flags &= ~KBASE_REG_FREE;
+ reg->flags |= KBASE_REG_GPU_NX; /* User-buffers are always No eXecute */
+ reg->flags &= ~KBASE_REG_GROWABLE; /* Cannot be grown */
+
+ if (*flags & BASE_MEM_PROT_CPU_WR)
+ reg->flags |= KBASE_REG_CPU_WR;
+
+ if (*flags & BASE_MEM_PROT_CPU_RD)
+ reg->flags |= KBASE_REG_CPU_RD;
+
+ if (*flags & BASE_MEM_PROT_GPU_WR)
+ reg->flags |= KBASE_REG_GPU_WR;
+
+ if (*flags & BASE_MEM_PROT_GPU_RD)
+ reg->flags |= KBASE_REG_GPU_RD;
+
+ down_read(&current->mm->mmap_sem);
+
+ /* A sanity check that get_user_pages will work on the memory */
+ /* (so the initial import fails on weird memory regions rather than */
+ /* the job failing when we try to handle the external resources). */
+ /* It doesn't take a reference to the pages (because the page list is NULL). */
+ /* We can't really store the page list because that would involve */
+ /* keeping the pages pinned - instead we pin/unpin around the job */
+ /* (as part of the external resources handling code) */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
+ reg->flags & KBASE_REG_GPU_WR, 0, NULL, NULL);
+#else
+ faulted_pages = get_user_pages(address, *va_pages,
+ reg->flags & KBASE_REG_GPU_WR, 0, NULL, NULL);
+#endif
+ up_read(&current->mm->mmap_sem);
+
+ if (faulted_pages != *va_pages)
+ goto fault_mismatch;
+
+ reg->gpu_alloc->imported.user_buf.size = size;
+ reg->gpu_alloc->imported.user_buf.address = address;
+ reg->gpu_alloc->imported.user_buf.nr_pages = faulted_pages;
+ reg->gpu_alloc->imported.user_buf.pages = kmalloc_array(faulted_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ reg->gpu_alloc->imported.user_buf.mm = current->mm;
+ atomic_inc(&current->mm->mm_count);
+
+ if (!reg->gpu_alloc->imported.user_buf.pages)
+ goto no_page_array;
+
+ reg->gpu_alloc->nents = 0;
+ reg->extent = 0;
+
+ return reg;
+
+no_page_array:
+fault_mismatch:
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+ kfree(reg);
+no_region:
+bad_size:
+ return NULL;
+
+}
+
+
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
+ u64 nents, struct base_mem_aliasing_info *ai,
+ u64 *num_pages)
+{
+ struct kbase_va_region *reg;
+ u64 gpu_va;
+ size_t i;
+ bool coherent;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(flags);
+ KBASE_DEBUG_ASSERT(ai);
+ KBASE_DEBUG_ASSERT(num_pages);
+
+ /* mask to only allowed flags */
+ *flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
+ BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL |
+ BASE_MEM_COHERENT_SYSTEM_REQUIRED);
+
+ if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR))) {
+ dev_warn(kctx->kbdev->dev,
+ "kbase_mem_alias called with bad flags (%llx)",
+ (unsigned long long)*flags);
+ goto bad_flags;
+ }
+ coherent = (*flags & BASE_MEM_COHERENT_SYSTEM) != 0 ||
+ (*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0;
+
+ if (!stride)
+ goto bad_stride;
+
+ if (!nents)
+ goto bad_nents;
+
+ if ((nents * stride) > (U64_MAX / PAGE_SIZE))
+ /* 64-bit address range is the max */
+ goto bad_size;
+
+ /* calculate the number of pages this alias will cover */
+ *num_pages = nents * stride;
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /* 64-bit tasks must MMAP anyway, but not expose this address to
+ * clients */
+ *flags |= BASE_MEM_NEED_MMAP;
+ reg = kbase_alloc_free_region(kctx, 0, *num_pages,
+ KBASE_REG_ZONE_SAME_VA);
+ } else {
+#else
+ if (1) {
+#endif
+ reg = kbase_alloc_free_region(kctx, 0, *num_pages,
+ KBASE_REG_ZONE_CUSTOM_VA);
+ }
+
+ if (!reg)
+ goto no_reg;
+
+ /* zero-sized page array, as we don't need one/can support one */
+ reg->gpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_ALIAS);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc))
+ goto no_alloc_obj;
+
+ reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ kbase_update_region_flags(kctx, reg, *flags);
+
+ reg->gpu_alloc->imported.alias.nents = nents;
+ reg->gpu_alloc->imported.alias.stride = stride;
+ reg->gpu_alloc->imported.alias.aliased = vzalloc(sizeof(*reg->gpu_alloc->imported.alias.aliased) * nents);
+ if (!reg->gpu_alloc->imported.alias.aliased)
+ goto no_aliased_array;
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* validate and add src handles */
+ for (i = 0; i < nents; i++) {
+ if (ai[i].handle.basep.handle < BASE_MEM_FIRST_FREE_ADDRESS) {
+ if (ai[i].handle.basep.handle !=
+ BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE)
+ goto bad_handle; /* unsupported magic handle */
+ if (!ai[i].length)
+ goto bad_handle; /* must be > 0 */
+ if (ai[i].length > stride)
+ goto bad_handle; /* can't be larger than the
+ stride */
+ reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
+ } else {
+ struct kbase_va_region *aliasing_reg;
+ struct kbase_mem_phy_alloc *alloc;
+
+ aliasing_reg = kbase_region_tracker_find_region_base_address(
+ kctx,
+ (ai[i].handle.basep.handle >> PAGE_SHIFT) << PAGE_SHIFT);
+
+ /* validate found region */
+ if (!aliasing_reg)
+ goto bad_handle; /* Not found */
+ if (aliasing_reg->flags & KBASE_REG_FREE)
+ goto bad_handle; /* Free region */
+ if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
+ goto bad_handle; /* Ephemeral region */
+ if (!aliasing_reg->gpu_alloc)
+ goto bad_handle; /* No alloc */
+ if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+ goto bad_handle; /* Not a native alloc */
+ if (coherent != ((aliasing_reg->flags & KBASE_REG_SHARE_BOTH) != 0))
+ goto bad_handle;
+ /* Non-coherent memory cannot alias
+ coherent memory, and vice versa.*/
+
+ /* check size against stride */
+ if (!ai[i].length)
+ goto bad_handle; /* must be > 0 */
+ if (ai[i].length > stride)
+ goto bad_handle; /* can't be larger than the
+ stride */
+
+ alloc = aliasing_reg->gpu_alloc;
+
+ /* check against the alloc's size */
+ if (ai[i].offset > alloc->nents)
+ goto bad_handle; /* beyond end */
+ if (ai[i].offset + ai[i].length > alloc->nents)
+ goto bad_handle; /* beyond end */
+
+ reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
+ reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
+ reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
+ }
+ }
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+ /* Bind to a cookie */
+ if (!kctx->cookies) {
+ dev_err(kctx->kbdev->dev, "No cookies available for allocation!");
+ goto no_cookie;
+ }
+ /* return a cookie */
+ gpu_va = __ffs(kctx->cookies);
+ kctx->cookies &= ~(1UL << gpu_va);
+ BUG_ON(kctx->pending_regions[gpu_va]);
+ kctx->pending_regions[gpu_va] = reg;
+
+ /* relocate to correct base */
+ gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
+ gpu_va <<= PAGE_SHIFT;
+ } else /* we control the VA */ {
+#else
+ if (1) {
+#endif
+ if (kbase_gpu_mmap(kctx, reg, 0, *num_pages, 1) != 0) {
+ dev_warn(kctx->kbdev->dev, "Failed to map memory on GPU");
+ goto no_mmap;
+ }
+ /* return real GPU VA */
+ gpu_va = reg->start_pfn << PAGE_SHIFT;
+ }
+
+ reg->flags &= ~KBASE_REG_FREE;
+ reg->flags &= ~KBASE_REG_GROWABLE;
+
+ kbase_gpu_vm_unlock(kctx);
+
+ return gpu_va;
+
+#ifdef CONFIG_64BIT
+no_cookie:
+#endif
+no_mmap:
+bad_handle:
+ kbase_gpu_vm_unlock(kctx);
+no_aliased_array:
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+ kfree(reg);
+no_reg:
+bad_size:
+bad_nents:
+bad_stride:
+bad_flags:
+ return 0;
+}
+
+static u32 kbase_get_cache_line_alignment(struct kbase_context *kctx)
+{
+ u32 cpu_cache_line_size = cache_line_size();
+ u32 gpu_cache_line_size =
+ (1UL << kctx->kbdev->gpu_props.props.l2_props.log2_line_size);
+
+ return ((cpu_cache_line_size > gpu_cache_line_size) ?
+ cpu_cache_line_size :
+ gpu_cache_line_size);
+}
+
+static int kbase_check_buffer_size(struct kbase_context *kctx, u64 size)
+{
+ u32 cache_line_align = kbase_get_cache_line_alignment(kctx);
+
+ return (size & (cache_line_align - 1)) == 0 ? 0 : -EINVAL;
+}
+
+static int kbase_check_buffer_cache_alignment(struct kbase_context *kctx,
+ void __user *ptr)
+{
+ u32 cache_line_align = kbase_get_cache_line_alignment(kctx);
+
+ return ((uintptr_t)ptr & (cache_line_align - 1)) == 0 ? 0 : -EINVAL;
+}
+
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
+ void __user *phandle, u64 *gpu_va, u64 *va_pages,
+ u64 *flags)
+{
+ struct kbase_va_region *reg;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(gpu_va);
+ KBASE_DEBUG_ASSERT(va_pages);
+ KBASE_DEBUG_ASSERT(flags);
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
+ *flags |= BASE_MEM_SAME_VA;
+#endif
+
+ if (!kbase_check_import_flags(*flags)) {
+ dev_warn(kctx->kbdev->dev,
+ "kbase_mem_import called with bad flags (%llx)",
+ (unsigned long long)*flags);
+ goto bad_flags;
+ }
+
+ switch (type) {
+#ifdef CONFIG_UMP
+ case BASE_MEM_IMPORT_TYPE_UMP: {
+ ump_secure_id id;
+
+ if (get_user(id, (ump_secure_id __user *)phandle))
+ reg = NULL;
+ else
+ reg = kbase_mem_from_ump(kctx, id, va_pages, flags);
+ }
+ break;
+#endif /* CONFIG_UMP */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case BASE_MEM_IMPORT_TYPE_UMM: {
+ int fd;
+
+ if (get_user(fd, (int __user *)phandle))
+ reg = NULL;
+ else
+ reg = kbase_mem_from_umm(kctx, fd, va_pages, flags);
+ }
+ break;
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+ case BASE_MEM_IMPORT_TYPE_USER_BUFFER: {
+ struct base_mem_import_user_buffer user_buffer;
+ void __user *uptr;
+
+ if (copy_from_user(&user_buffer, phandle,
+ sizeof(user_buffer))) {
+ reg = NULL;
+ } else {
+#ifdef CONFIG_COMPAT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ uptr = compat_ptr(user_buffer.ptr.compat_value);
+ else
+#endif
+ uptr = user_buffer.ptr.value;
+
+ if (0 != kbase_check_buffer_cache_alignment(kctx,
+ uptr)) {
+ dev_warn(kctx->kbdev->dev,
+ "User buffer is not cache line aligned!\n");
+ goto no_reg;
+ }
+
+ if (0 != kbase_check_buffer_size(kctx,
+ user_buffer.length)) {
+ dev_warn(kctx->kbdev->dev,
+ "User buffer size is not multiple of cache line size!\n");
+ goto no_reg;
+ }
+
+ reg = kbase_mem_from_user_buffer(kctx,
+ (unsigned long)uptr, user_buffer.length,
+ va_pages, flags);
+ }
+ break;
+ }
+ default: {
+ reg = NULL;
+ break;
+ }
+ }
+
+ if (!reg)
+ goto no_reg;
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* mmap needed to setup VA? */
+ if (*flags & (BASE_MEM_SAME_VA | BASE_MEM_NEED_MMAP)) {
+ /* Bind to a cookie */
+ if (!kctx->cookies)
+ goto no_cookie;
+ /* return a cookie */
+ *gpu_va = __ffs(kctx->cookies);
+ kctx->cookies &= ~(1UL << *gpu_va);
+ BUG_ON(kctx->pending_regions[*gpu_va]);
+ kctx->pending_regions[*gpu_va] = reg;
+
+ /* relocate to correct base */
+ *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
+ *gpu_va <<= PAGE_SHIFT;
+
+ } else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES) {
+ /* we control the VA, mmap now to the GPU */
+ if (kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1) != 0)
+ goto no_gpu_va;
+ /* return real GPU VA */
+ *gpu_va = reg->start_pfn << PAGE_SHIFT;
+ } else {
+ /* we control the VA, but nothing to mmap yet */
+ if (kbase_add_va_region(kctx, reg, 0, *va_pages, 1) != 0)
+ goto no_gpu_va;
+ /* return real GPU VA */
+ *gpu_va = reg->start_pfn << PAGE_SHIFT;
+ }
+
+ /* clear out private flags */
+ *flags &= ((1UL << BASE_MEM_FLAGS_NR_BITS) - 1);
+
+ kbase_gpu_vm_unlock(kctx);
+
+ return 0;
+
+no_gpu_va:
+no_cookie:
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ kfree(reg);
+no_reg:
+bad_flags:
+ *gpu_va = 0;
+ *va_pages = 0;
+ *flags = 0;
+ return -ENOMEM;
+}
+
+
+static int zap_range_nolock(struct mm_struct *mm,
+ const struct vm_operations_struct *vm_ops,
+ unsigned long start, unsigned long end)
+{
+ struct vm_area_struct *vma;
+ int err = -EINVAL; /* in case end < start */
+
+ while (start < end) {
+ unsigned long local_start;
+ unsigned long local_end;
+
+ vma = find_vma_intersection(mm, start, end);
+ if (!vma)
+ break;
+
+ /* is it ours? */
+ if (vma->vm_ops != vm_ops)
+ goto try_next;
+
+ local_start = vma->vm_start;
+
+ if (start > local_start)
+ local_start = start;
+
+ local_end = vma->vm_end;
+
+ if (end < local_end)
+ local_end = end;
+
+ err = zap_vma_ptes(vma, local_start, local_end - local_start);
+ if (unlikely(err))
+ break;
+
+try_next:
+ /* go to next vma, if any */
+ start = vma->vm_end;
+ }
+
+ return err;
+}
+
+int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages)
+{
+ phys_addr_t *phy_pages;
+ u64 delta = new_pages - old_pages;
+ int ret = 0;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ /* Map the new pages into the GPU */
+ phy_pages = kbase_get_gpu_phy_pages(reg);
+ ret = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages,
+ phy_pages + old_pages, delta, reg->flags);
+
+ return ret;
+}
+
+static int kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages)
+{
+ struct kbase_mem_phy_alloc *cpu_alloc = reg->cpu_alloc;
+ struct kbase_cpu_mapping *mapping;
+ int err;
+
+ lockdep_assert_held(&kctx->process_mm->mmap_sem);
+
+ list_for_each_entry(mapping, &cpu_alloc->mappings, mappings_list) {
+ unsigned long mapping_size;
+
+ mapping_size = (mapping->vm_end - mapping->vm_start)
+ >> PAGE_SHIFT;
+
+ /* is this mapping affected ?*/
+ if ((mapping->page_off + mapping_size) > new_pages) {
+ unsigned long first_bad = 0;
+
+ if (new_pages > mapping->page_off)
+ first_bad = new_pages - mapping->page_off;
+
+ err = zap_range_nolock(current->mm,
+ &kbase_vm_ops,
+ mapping->vm_start +
+ (first_bad << PAGE_SHIFT),
+ mapping->vm_end);
+
+ WARN(err,
+ "Failed to zap VA range (0x%lx - 0x%lx);\n",
+ mapping->vm_start +
+ (first_bad << PAGE_SHIFT),
+ mapping->vm_end
+ );
+
+ /* The zap failed, give up and exit */
+ if (err)
+ goto failed;
+ }
+ }
+
+ return 0;
+
+failed:
+ return err;
+}
+
+static int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages)
+{
+ u64 delta = old_pages - new_pages;
+ int ret = 0;
+
+ ret = kbase_mmu_teardown_pages(kctx,
+ reg->start_pfn + new_pages, delta);
+
+ return ret;
+}
+
+int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages, enum base_backing_threshold_status *failure_reason)
+{
+ u64 old_pages;
+ u64 delta;
+ int res = -EINVAL;
+ struct kbase_va_region *reg;
+ bool read_locked = false;
+
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(failure_reason);
+ KBASE_DEBUG_ASSERT(gpu_addr != 0);
+
+ down_write(&current->mm->mmap_sem);
+ kbase_gpu_vm_lock(kctx);
+
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE)) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
+ goto out_unlock;
+ }
+
+ KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+ KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+
+ if (reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
+ goto out_unlock;
+ }
+
+ if (0 == (reg->flags & KBASE_REG_GROWABLE)) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
+ goto out_unlock;
+ }
+
+ if (new_pages > reg->nr_pages) {
+ /* Would overflow the VA region */
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS;
+ goto out_unlock;
+ }
+
+ /* can't be mapped more than once on the GPU */
+ if (atomic_read(&reg->gpu_alloc->gpu_mappings) > 1) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
+ goto out_unlock;
+ }
+ /* can't grow regions which are ephemeral */
+ if (reg->flags & KBASE_REG_DONT_NEED) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE;
+ goto out_unlock;
+ }
+
+ if (new_pages == reg->gpu_alloc->nents) {
+ /* no change */
+ res = 0;
+ goto out_unlock;
+ }
+
+ old_pages = kbase_reg_current_backed_size(reg);
+ if (new_pages > old_pages) {
+ delta = new_pages - old_pages;
+
+ /*
+ * No update to the mm so downgrade the writer lock to a read
+ * lock so other readers aren't blocked after this point.
+ */
+ downgrade_write(&current->mm->mmap_sem);
+ read_locked = true;
+
+ /* Allocate some more pages */
+ if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, delta) != 0) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
+ goto out_unlock;
+ }
+ if (reg->cpu_alloc != reg->gpu_alloc) {
+ if (kbase_alloc_phy_pages_helper(
+ reg->gpu_alloc, delta) != 0) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
+ kbase_free_phy_pages_helper(reg->cpu_alloc,
+ delta);
+ goto out_unlock;
+ }
+ }
+
+ /* No update required for CPU mappings, that's done on fault. */
+
+ /* Update GPU mapping. */
+ res = kbase_mem_grow_gpu_mapping(kctx, reg,
+ new_pages, old_pages);
+
+ /* On error free the new pages */
+ if (res) {
+ kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ kbase_free_phy_pages_helper(reg->gpu_alloc,
+ delta);
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
+ goto out_unlock;
+ }
+ } else {
+ delta = old_pages - new_pages;
+
+ /* Update all CPU mapping(s) */
+ res = kbase_mem_shrink_cpu_mapping(kctx, reg,
+ new_pages, old_pages);
+ if (res) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
+ goto out_unlock;
+ }
+
+ /* Update the GPU mapping */
+ res = kbase_mem_shrink_gpu_mapping(kctx, reg,
+ new_pages, old_pages);
+ if (res) {
+ *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM;
+ goto out_unlock;
+ }
+
+ kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ kbase_free_phy_pages_helper(reg->gpu_alloc, delta);
+ }
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ if (read_locked)
+ up_read(&current->mm->mmap_sem);
+ else
+ up_write(&current->mm->mmap_sem);
+
+ return res;
+}
+
+static void kbase_cpu_vm_open(struct vm_area_struct *vma)
+{
+ struct kbase_cpu_mapping *map = vma->vm_private_data;
+
+ KBASE_DEBUG_ASSERT(map);
+ KBASE_DEBUG_ASSERT(map->count > 0);
+ /* non-atomic as we're under Linux' mm lock */
+ map->count++;
+}
+
+static void kbase_cpu_vm_close(struct vm_area_struct *vma)
+{
+ struct kbase_cpu_mapping *map = vma->vm_private_data;
+
+ KBASE_DEBUG_ASSERT(map);
+ KBASE_DEBUG_ASSERT(map->count > 0);
+
+ /* non-atomic as we're under Linux' mm lock */
+ if (--map->count)
+ return;
+
+ KBASE_DEBUG_ASSERT(map->kctx);
+ KBASE_DEBUG_ASSERT(map->alloc);
+
+ kbase_gpu_vm_lock(map->kctx);
+
+ if (map->region) {
+ KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) ==
+ KBASE_REG_ZONE_SAME_VA);
+ /* Avoid freeing memory on the process death which results in
+ * GPU Page Fault. Memory will be freed in kbase_destroy_context
+ */
+ if (!(current->flags & PF_EXITING))
+ kbase_mem_free_region(map->kctx, map->region);
+ }
+
+ list_del(&map->mappings_list);
+
+ kbase_gpu_vm_unlock(map->kctx);
+
+ kbase_mem_phy_alloc_put(map->alloc);
+ kfree(map);
+}
+
+KBASE_EXPORT_TEST_API(kbase_cpu_vm_close);
+
+
+static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kbase_cpu_mapping *map = vma->vm_private_data;
+ pgoff_t rel_pgoff;
+ size_t i;
+
+ KBASE_DEBUG_ASSERT(map);
+ KBASE_DEBUG_ASSERT(map->count > 0);
+ KBASE_DEBUG_ASSERT(map->kctx);
+ KBASE_DEBUG_ASSERT(map->alloc);
+
+ /* we don't use vmf->pgoff as it's affected by our mmap with
+ * offset being a GPU VA or a cookie */
+ rel_pgoff = ((unsigned long)vmf->virtual_address - map->vm_start)
+ >> PAGE_SHIFT;
+
+ kbase_gpu_vm_lock(map->kctx);
+ if (map->page_off + rel_pgoff >= map->alloc->nents)
+ goto locked_bad_fault;
+
+ /* Fault on access to DONT_NEED regions */
+ if (map->alloc->reg && (map->alloc->reg->flags & KBASE_REG_DONT_NEED))
+ goto locked_bad_fault;
+
+ /* insert all valid pages from the fault location */
+ for (i = rel_pgoff;
+ i < MIN((vma->vm_end - vma->vm_start) >> PAGE_SHIFT,
+ map->alloc->nents - map->page_off); i++) {
+ int ret = vm_insert_pfn(vma, map->vm_start + (i << PAGE_SHIFT),
+ PFN_DOWN(map->alloc->pages[map->page_off + i]));
+ if (ret < 0 && ret != -EBUSY)
+ goto locked_bad_fault;
+ }
+
+ kbase_gpu_vm_unlock(map->kctx);
+ /* we resolved it, nothing for VM to do */
+ return VM_FAULT_NOPAGE;
+
+locked_bad_fault:
+ kbase_gpu_vm_unlock(map->kctx);
+ return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct kbase_vm_ops = {
+ .open = kbase_cpu_vm_open,
+ .close = kbase_cpu_vm_close,
+ .fault = kbase_cpu_vm_fault
+};
+
+static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close)
+{
+ struct kbase_cpu_mapping *map;
+ u64 start_off = vma->vm_pgoff - reg->start_pfn;
+ phys_addr_t *page_array;
+ int err = 0;
+ int i;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+
+ if (!map) {
+ WARN_ON(1);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * VM_DONTCOPY - don't make this mapping available in fork'ed processes
+ * VM_DONTEXPAND - disable mremap on this region
+ * VM_IO - disables paging
+ * VM_DONTDUMP - Don't include in core dumps (3.7 only)
+ * VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
+ * This is needed to support using the dedicated and
+ * the OS based memory backends together.
+ */
+ /*
+ * This will need updating to propagate coherency flags
+ * See MIDBASE-1057
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
+#else
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
+#endif
+ vma->vm_ops = &kbase_vm_ops;
+ vma->vm_private_data = map;
+
+ page_array = kbase_get_cpu_phy_pages(reg);
+
+ if (!(reg->flags & KBASE_REG_CPU_CACHED) &&
+ (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) {
+ /* We can't map vmalloc'd memory uncached.
+ * Other memory will have been returned from
+ * kbase_mem_pool which would be
+ * suitable for mapping uncached.
+ */
+ BUG_ON(kaddr);
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+
+ if (!kaddr) {
+ unsigned long addr = vma->vm_start + aligned_offset;
+
+ vma->vm_flags |= VM_PFNMAP;
+ for (i = 0; i < nr_pages; i++) {
+ unsigned long pfn = PFN_DOWN(page_array[i + start_off]);
+
+ err = vm_insert_pfn(vma, addr, pfn);
+ if (WARN_ON(err))
+ break;
+
+ addr += PAGE_SIZE;
+ }
+ } else {
+ WARN_ON(aligned_offset);
+ /* MIXEDMAP so we can vfree the kaddr early and not track it after map time */
+ vma->vm_flags |= VM_MIXEDMAP;
+ /* vmalloc remaping is easy... */
+ err = remap_vmalloc_range(vma, kaddr, 0);
+ WARN_ON(err);
+ }
+
+ if (err) {
+ kfree(map);
+ goto out;
+ }
+
+ map->page_off = start_off;
+ map->region = free_on_close ? reg : NULL;
+ map->kctx = reg->kctx;
+ map->vm_start = vma->vm_start + aligned_offset;
+ if (aligned_offset) {
+ KBASE_DEBUG_ASSERT(!start_off);
+ map->vm_end = map->vm_start + (reg->nr_pages << PAGE_SHIFT);
+ } else {
+ map->vm_end = vma->vm_end;
+ }
+ map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+ map->count = 1; /* start with one ref */
+
+ if (reg->flags & KBASE_REG_CPU_CACHED)
+ map->alloc->properties |= KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+ list_add(&map->mappings_list, &map->alloc->mappings);
+
+ out:
+ return err;
+}
+
+static int kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr)
+{
+ struct kbase_va_region *new_reg;
+ u32 nr_pages;
+ size_t size;
+ int err = 0;
+ u32 *tb;
+ int owns_tb = 1;
+
+ dev_dbg(kctx->kbdev->dev, "in %s\n", __func__);
+ size = (vma->vm_end - vma->vm_start);
+ nr_pages = size >> PAGE_SHIFT;
+
+ if (!kctx->jctx.tb) {
+ KBASE_DEBUG_ASSERT(0 != size);
+ tb = vmalloc_user(size);
+
+ if (NULL == tb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kbase_device_trace_buffer_install(kctx, tb, size);
+ if (err) {
+ vfree(tb);
+ goto out;
+ }
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+
+ *kaddr = kctx->jctx.tb;
+
+ new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
+ if (!new_reg) {
+ err = -ENOMEM;
+ WARN_ON(1);
+ goto out_no_region;
+ }
+
+ new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_TB);
+ if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
+ err = -ENOMEM;
+ new_reg->cpu_alloc = NULL;
+ WARN_ON(1);
+ goto out_no_alloc;
+ }
+
+ new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
+
+ new_reg->cpu_alloc->imported.kctx = kctx;
+ new_reg->flags &= ~KBASE_REG_FREE;
+ new_reg->flags |= KBASE_REG_CPU_CACHED;
+
+ /* alloc now owns the tb */
+ owns_tb = 0;
+
+ if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
+ err = -ENOMEM;
+ WARN_ON(1);
+ goto out_no_va_region;
+ }
+
+ *reg = new_reg;
+
+ /* map read only, noexec */
+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
+ /* the rest of the flags is added by the cpu_mmap handler */
+
+ dev_dbg(kctx->kbdev->dev, "%s done\n", __func__);
+ return 0;
+
+out_no_va_region:
+out_no_alloc:
+ kbase_free_alloced_region(new_reg);
+out_no_region:
+ if (owns_tb) {
+ kbase_device_trace_buffer_uninstall(kctx);
+ vfree(tb);
+ }
+out:
+ return err;
+}
+
+static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
+{
+ struct kbase_va_region *new_reg;
+ void *kaddr;
+ u32 nr_pages;
+ size_t size;
+ int err = 0;
+
+ dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
+ size = (vma->vm_end - vma->vm_start);
+ nr_pages = size >> PAGE_SHIFT;
+
+ kaddr = kbase_mmu_dump(kctx, nr_pages);
+
+ if (!kaddr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
+ if (!new_reg) {
+ err = -ENOMEM;
+ WARN_ON(1);
+ goto out;
+ }
+
+ new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_RAW);
+ if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
+ err = -ENOMEM;
+ new_reg->cpu_alloc = NULL;
+ WARN_ON(1);
+ goto out_no_alloc;
+ }
+
+ new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
+
+ new_reg->flags &= ~KBASE_REG_FREE;
+ new_reg->flags |= KBASE_REG_CPU_CACHED;
+ if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
+ err = -ENOMEM;
+ WARN_ON(1);
+ goto out_va_region;
+ }
+
+ *kmap_addr = kaddr;
+ *reg = new_reg;
+
+ dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
+ return 0;
+
+out_no_alloc:
+out_va_region:
+ kbase_free_alloced_region(new_reg);
+out:
+ return err;
+}
+
+
+void kbase_os_mem_map_lock(struct kbase_context *kctx)
+{
+ struct mm_struct *mm = current->mm;
+ (void)kctx;
+ down_read(&mm->mmap_sem);
+}
+
+void kbase_os_mem_map_unlock(struct kbase_context *kctx)
+{
+ struct mm_struct *mm = current->mm;
+ (void)kctx;
+ up_read(&mm->mmap_sem);
+}
+
+int kbase_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct kbase_context *kctx = file->private_data;
+ struct kbase_va_region *reg;
+ void *kaddr = NULL;
+ size_t nr_pages;
+ int err = 0;
+ int free_on_close = 0;
+ struct device *dev = kctx->kbdev->dev;
+ size_t aligned_offset = 0;
+
+ dev_dbg(dev, "kbase_mmap\n");
+ nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+ /* strip away corresponding VM_MAY% flags to the VM_% flags requested */
+ vma->vm_flags &= ~((vma->vm_flags & (VM_READ | VM_WRITE)) << 4);
+
+ if (0 == nr_pages) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ kbase_gpu_vm_lock(kctx);
+
+ if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) {
+ /* The non-mapped tracking helper page */
+ err = kbase_tracking_page_setup(kctx, vma);
+ goto out_unlock;
+ }
+
+ /* if not the MTP, verify that the MTP has been mapped */
+ rcu_read_lock();
+ /* catches both when the special page isn't present or
+ * when we've forked */
+ if (rcu_dereference(kctx->process_mm) != current->mm) {
+ err = -EINVAL;
+ rcu_read_unlock();
+ goto out_unlock;
+ }
+ rcu_read_unlock();
+
+ switch (vma->vm_pgoff) {
+ case PFN_DOWN(BASEP_MEM_INVALID_HANDLE):
+ case PFN_DOWN(BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE):
+ /* Illegal handle for direct map */
+ err = -EINVAL;
+ goto out_unlock;
+ case PFN_DOWN(BASE_MEM_TRACE_BUFFER_HANDLE):
+ err = kbase_trace_buffer_mmap(kctx, vma, &reg, &kaddr);
+ if (0 != err)
+ goto out_unlock;
+ dev_dbg(dev, "kbase_trace_buffer_mmap ok\n");
+ /* free the region on munmap */
+ free_on_close = 1;
+ goto map;
+ case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE):
+ /* MMU dump */
+ err = kbase_mmu_dump_mmap(kctx, vma, &reg, &kaddr);
+ if (0 != err)
+ goto out_unlock;
+ /* free the region on munmap */
+ free_on_close = 1;
+ goto map;
+ case PFN_DOWN(BASE_MEM_COOKIE_BASE) ...
+ PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: {
+ /* SAME_VA stuff, fetch the right region */
+ int gpu_pc_bits;
+ int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
+
+ gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+ reg = kctx->pending_regions[cookie];
+ if (!reg) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (reg->flags & KBASE_REG_ALIGNED) {
+ /* nr_pages must be able to hold alignment pages
+ * plus actual pages */
+ unsigned long align = 1ULL << gpu_pc_bits;
+ unsigned long extra_pages = 3 * PFN_DOWN(align);
+ unsigned long aligned_addr;
+ unsigned long aligned_addr_end;
+ unsigned long nr_bytes = reg->nr_pages << PAGE_SHIFT;
+
+ if (kctx->api_version < KBASE_API_VERSION(8, 5))
+ /* Maintain compatibility with old userspace */
+ extra_pages = PFN_DOWN(align);
+
+ if (nr_pages != reg->nr_pages + extra_pages) {
+ /* incorrect mmap size */
+ /* leave the cookie for a potential
+ * later mapping, or to be reclaimed
+ * later when the context is freed */
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ aligned_addr = ALIGN(vma->vm_start, align);
+ aligned_addr_end = aligned_addr + nr_bytes;
+
+ if (kctx->api_version >= KBASE_API_VERSION(8, 5)) {
+ if ((aligned_addr_end & BASE_MEM_MASK_4GB) == 0) {
+ /* Can't end at 4GB boundary */
+ aligned_addr += 2 * align;
+ } else if ((aligned_addr & BASE_MEM_MASK_4GB) == 0) {
+ /* Can't start at 4GB boundary */
+ aligned_addr += align;
+ }
+ }
+
+ aligned_offset = aligned_addr - vma->vm_start;
+ } else if (reg->nr_pages != nr_pages) {
+ /* incorrect mmap size */
+ /* leave the cookie for a potential later
+ * mapping, or to be reclaimed later when the
+ * context is freed */
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if ((vma->vm_flags & VM_READ &&
+ !(reg->flags & KBASE_REG_CPU_RD)) ||
+ (vma->vm_flags & VM_WRITE &&
+ !(reg->flags & KBASE_REG_CPU_WR))) {
+ /* VM flags inconsistent with region flags */
+ err = -EPERM;
+ dev_err(dev, "%s:%d inconsistent VM flags\n",
+ __FILE__, __LINE__);
+ goto out_unlock;
+ }
+
+ /* adjust down nr_pages to what we have physically */
+ nr_pages = kbase_reg_current_backed_size(reg);
+
+ if (kbase_gpu_mmap(kctx, reg,
+ vma->vm_start + aligned_offset,
+ reg->nr_pages, 1) != 0) {
+ dev_err(dev, "%s:%d\n", __FILE__, __LINE__);
+ /* Unable to map in GPU space. */
+ WARN_ON(1);
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /* no need for the cookie anymore */
+ kctx->pending_regions[cookie] = NULL;
+ kctx->cookies |= (1UL << cookie);
+
+ /*
+ * Overwrite the offset with the
+ * region start_pfn, so we effectively
+ * map from offset 0 in the region.
+ */
+ vma->vm_pgoff = reg->start_pfn;
+
+ /* free the region on munmap */
+ free_on_close = 1;
+ goto map;
+ }
+ default: {
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, (u64)vma->vm_pgoff << PAGE_SHIFT);
+
+ if (reg && !(reg->flags & KBASE_REG_FREE)) {
+ /* will this mapping overflow the size of the region? */
+ if (nr_pages > (reg->nr_pages - (vma->vm_pgoff - reg->start_pfn)))
+ goto overflow;
+
+ if ((vma->vm_flags & VM_READ &&
+ !(reg->flags & KBASE_REG_CPU_RD)) ||
+ (vma->vm_flags & VM_WRITE &&
+ !(reg->flags & KBASE_REG_CPU_WR))) {
+ /* VM flags inconsistent with region flags */
+ err = -EPERM;
+ dev_err(dev, "%s:%d inconsistent VM flags\n",
+ __FILE__, __LINE__);
+ goto out_unlock;
+ }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ if (reg->cpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM)
+ goto dma_map;
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+ /* limit what we map to the amount currently backed */
+ if (reg->cpu_alloc->nents < (vma->vm_pgoff - reg->start_pfn + nr_pages)) {
+ if ((vma->vm_pgoff - reg->start_pfn) >= reg->cpu_alloc->nents)
+ nr_pages = 0;
+ else
+ nr_pages = reg->cpu_alloc->nents - (vma->vm_pgoff - reg->start_pfn);
+ }
+
+ goto map;
+ }
+
+overflow:
+ err = -ENOMEM;
+ goto out_unlock;
+ } /* default */
+ } /* switch */
+map:
+ err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, aligned_offset, free_on_close);
+
+ if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
+ /* MMU dump - userspace should now have a reference on
+ * the pages, so we can now free the kernel mapping */
+ vfree(kaddr);
+ }
+ goto out_unlock;
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+dma_map:
+ err = dma_buf_mmap(reg->cpu_alloc->imported.umm.dma_buf, vma, vma->vm_pgoff - reg->start_pfn);
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+out:
+ if (err)
+ dev_err(dev, "mmap failed %d\n", err);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmap);
+
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ unsigned long prot_request, struct kbase_vmap_struct *map)
+{
+ struct kbase_va_region *reg;
+ unsigned long page_index;
+ unsigned int offset = gpu_addr & ~PAGE_MASK;
+ size_t page_count = PFN_UP(offset + size);
+ phys_addr_t *page_array;
+ struct page **pages;
+ void *cpu_addr = NULL;
+ pgprot_t prot;
+ size_t i;
+ bool sync_needed;
+
+ if (!size || !map)
+ return NULL;
+
+ /* check if page_count calculation will wrap */
+ if (size > ((size_t)-1 / PAGE_SIZE))
+ return NULL;
+
+ kbase_gpu_vm_lock(kctx);
+
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ page_index = (gpu_addr >> PAGE_SHIFT) - reg->start_pfn;
+
+ /* check if page_index + page_count will wrap */
+ if (-1UL - page_count < page_index)
+ goto out_unlock;
+
+ if (page_index + page_count > kbase_reg_current_backed_size(reg))
+ goto out_unlock;
+
+ if (reg->flags & KBASE_REG_DONT_NEED)
+ goto out_unlock;
+
+ /* check access permissions can be satisfied
+ * Intended only for checking KBASE_REG_{CPU,GPU}_{RD,WR} */
+ if ((reg->flags & prot_request) != prot_request)
+ goto out_unlock;
+
+ page_array = kbase_get_cpu_phy_pages(reg);
+ if (!page_array)
+ goto out_unlock;
+
+ pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto out_unlock;
+
+ for (i = 0; i < page_count; i++)
+ pages[i] = pfn_to_page(PFN_DOWN(page_array[page_index + i]));
+
+ prot = PAGE_KERNEL;
+ if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
+ /* Map uncached */
+ prot = pgprot_writecombine(prot);
+ }
+ /* Note: enforcing a RO prot_request onto prot is not done, since:
+ * - CPU-arch-specific integration required
+ * - kbase_vmap() requires no access checks to be made/enforced */
+
+ cpu_addr = vmap(pages, page_count, VM_MAP, prot);
+
+ kfree(pages);
+
+ if (!cpu_addr)
+ goto out_unlock;
+
+ map->gpu_addr = gpu_addr;
+ map->cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+ map->cpu_pages = &kbase_get_cpu_phy_pages(reg)[page_index];
+ map->gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ map->gpu_pages = &kbase_get_gpu_phy_pages(reg)[page_index];
+ map->addr = (void *)((uintptr_t)cpu_addr + offset);
+ map->size = size;
+ map->is_cached = (reg->flags & KBASE_REG_CPU_CACHED) != 0;
+ sync_needed = map->is_cached;
+
+#ifdef CONFIG_MALI_COH_KERN
+ /* kernel can use coherent memory if supported */
+ if (kctx->kbdev->system_coherency == COHERENCY_ACE)
+ sync_needed = false;
+#endif
+
+ if (sync_needed) {
+ /* Sync first page */
+ size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+ phys_addr_t cpu_pa = map->cpu_pages[0];
+ phys_addr_t gpu_pa = map->gpu_pages[0];
+
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz,
+ KBASE_SYNC_TO_CPU);
+
+ /* Sync middle pages (if any) */
+ for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+ cpu_pa = map->cpu_pages[i];
+ gpu_pa = map->gpu_pages[i];
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE,
+ KBASE_SYNC_TO_CPU);
+ }
+
+ /* Sync last page (if any) */
+ if (page_count > 1) {
+ cpu_pa = map->cpu_pages[page_count - 1];
+ gpu_pa = map->gpu_pages[page_count - 1];
+ sz = ((offset + size - 1) & ~PAGE_MASK) + 1;
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz,
+ KBASE_SYNC_TO_CPU);
+ }
+ }
+ kbase_gpu_vm_unlock(kctx);
+
+ return map->addr;
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return NULL;
+}
+
+void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ struct kbase_vmap_struct *map)
+{
+ /* 0 is specified for prot_request to indicate no access checks should
+ * be made.
+ *
+ * As mentioned in kbase_vmap_prot() this means that a kernel-side
+ * CPU-RO mapping is not enforced to allow this to work */
+ return kbase_vmap_prot(kctx, gpu_addr, size, 0u, map);
+}
+KBASE_EXPORT_TEST_API(kbase_vmap);
+
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map)
+{
+ void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK);
+ bool sync_needed = map->is_cached;
+ vunmap(addr);
+#ifdef CONFIG_MALI_COH_KERN
+ /* kernel can use coherent memory if supported */
+ if (kctx->kbdev->system_coherency == COHERENCY_ACE)
+ sync_needed = false;
+#endif
+ if (sync_needed) {
+ off_t offset = (uintptr_t)map->addr & ~PAGE_MASK;
+ size_t size = map->size;
+ size_t page_count = PFN_UP(offset + size);
+ size_t i;
+
+ /* Sync first page */
+ size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+ phys_addr_t cpu_pa = map->cpu_pages[0];
+ phys_addr_t gpu_pa = map->gpu_pages[0];
+
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz,
+ KBASE_SYNC_TO_DEVICE);
+
+ /* Sync middle pages (if any) */
+ for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+ cpu_pa = map->cpu_pages[i];
+ gpu_pa = map->gpu_pages[i];
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE,
+ KBASE_SYNC_TO_DEVICE);
+ }
+
+ /* Sync last page (if any) */
+ if (page_count > 1) {
+ cpu_pa = map->cpu_pages[page_count - 1];
+ gpu_pa = map->gpu_pages[page_count - 1];
+ sz = ((offset + size - 1) & ~PAGE_MASK) + 1;
+ kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz,
+ KBASE_SYNC_TO_DEVICE);
+ }
+ }
+ map->gpu_addr = 0;
+ map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc);
+ map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc);
+ map->cpu_pages = NULL;
+ map->gpu_pages = NULL;
+ map->addr = NULL;
+ map->size = 0;
+ map->is_cached = false;
+}
+KBASE_EXPORT_TEST_API(kbase_vunmap);
+
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
+{
+ struct mm_struct *mm;
+
+ rcu_read_lock();
+ mm = rcu_dereference(kctx->process_mm);
+ if (mm) {
+ atomic_add(pages, &kctx->nonmapped_pages);
+#ifdef SPLIT_RSS_COUNTING
+ add_mm_counter(mm, MM_FILEPAGES, pages);
+#else
+ spin_lock(&mm->page_table_lock);
+ add_mm_counter(mm, MM_FILEPAGES, pages);
+ spin_unlock(&mm->page_table_lock);
+#endif
+ }
+ rcu_read_unlock();
+}
+
+static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx)
+{
+ int pages;
+ struct mm_struct *mm;
+
+ spin_lock(&kctx->mm_update_lock);
+ mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
+ if (!mm) {
+ spin_unlock(&kctx->mm_update_lock);
+ return;
+ }
+
+ rcu_assign_pointer(kctx->process_mm, NULL);
+ spin_unlock(&kctx->mm_update_lock);
+ synchronize_rcu();
+
+ pages = atomic_xchg(&kctx->nonmapped_pages, 0);
+#ifdef SPLIT_RSS_COUNTING
+ add_mm_counter(mm, MM_FILEPAGES, -pages);
+#else
+ spin_lock(&mm->page_table_lock);
+ add_mm_counter(mm, MM_FILEPAGES, -pages);
+ spin_unlock(&mm->page_table_lock);
+#endif
+}
+
+static void kbase_special_vm_close(struct vm_area_struct *vma)
+{
+ struct kbase_context *kctx;
+
+ kctx = vma->vm_private_data;
+ kbasep_os_process_page_usage_drain(kctx);
+}
+
+static const struct vm_operations_struct kbase_vm_special_ops = {
+ .close = kbase_special_vm_close,
+};
+
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma)
+{
+ /* check that this is the only tracking page */
+ spin_lock(&kctx->mm_update_lock);
+ if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) {
+ spin_unlock(&kctx->mm_update_lock);
+ return -EFAULT;
+ }
+
+ rcu_assign_pointer(kctx->process_mm, current->mm);
+
+ spin_unlock(&kctx->mm_update_lock);
+
+ /* no real access */
+ vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+#else
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
+#endif
+ vma->vm_ops = &kbase_vm_special_ops;
+ vma->vm_private_data = kctx;
+
+ return 0;
+}
+void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle)
+{
+ int i;
+ int res;
+ void *va;
+ dma_addr_t dma_pa;
+ struct kbase_va_region *reg;
+ phys_addr_t *page_array;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ DEFINE_DMA_ATTRS(attrs);
+#endif
+
+ u32 pages = ((size - 1) >> PAGE_SHIFT) + 1;
+ u32 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR |
+ BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(0 != size);
+ KBASE_DEBUG_ASSERT(0 != pages);
+
+ if (size == 0)
+ goto err;
+
+ /* All the alloc calls return zeroed memory */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL,
+ attrs);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL,
+ &attrs);
+#else
+ va = dma_alloc_writecombine(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL);
+#endif
+ if (!va)
+ goto err;
+
+ /* Store the state so we can free it later. */
+ handle->cpu_va = va;
+ handle->dma_pa = dma_pa;
+ handle->size = size;
+
+
+ reg = kbase_alloc_free_region(kctx, 0, pages, KBASE_REG_ZONE_SAME_VA);
+ if (!reg)
+ goto no_reg;
+
+ reg->flags &= ~KBASE_REG_FREE;
+ kbase_update_region_flags(kctx, reg, flags);
+
+ reg->cpu_alloc = kbase_alloc_create(pages, KBASE_MEM_TYPE_RAW);
+ if (IS_ERR_OR_NULL(reg->cpu_alloc))
+ goto no_alloc;
+
+ reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+
+ page_array = kbase_get_cpu_phy_pages(reg);
+
+ for (i = 0; i < pages; i++)
+ page_array[i] = dma_pa + (i << PAGE_SHIFT);
+
+ reg->cpu_alloc->nents = pages;
+
+ kbase_gpu_vm_lock(kctx);
+ res = kbase_gpu_mmap(kctx, reg, (uintptr_t) va, pages, 1);
+ kbase_gpu_vm_unlock(kctx);
+ if (res)
+ goto no_mmap;
+
+ return va;
+
+no_mmap:
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc:
+ kfree(reg);
+no_reg:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, attrs);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs);
+#else
+ dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa);
+#endif
+err:
+ return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_va_alloc);
+
+void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle)
+{
+ struct kbase_va_region *reg;
+ int err;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+ DEFINE_DMA_ATTRS(attrs);
+#endif
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(handle->cpu_va != NULL);
+
+ kbase_gpu_vm_lock(kctx);
+ reg = kbase_region_tracker_find_region_base_address(kctx, (uintptr_t)handle->cpu_va);
+ KBASE_DEBUG_ASSERT(reg);
+ err = kbase_gpu_munmap(kctx, reg);
+ kbase_gpu_vm_unlock(kctx);
+ KBASE_DEBUG_ASSERT(!err);
+
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ kfree(reg);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ dma_free_attrs(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa, DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ dma_free_attrs(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa, &attrs);
+#else
+ dma_free_writecombine(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa);
+#endif
+}
+KBASE_EXPORT_SYMBOL(kbase_va_free);
+
diff --git a/midgard-0.r2p0/mali_kbase_mem_linux.h b/midgard-0.r2p0/mali_kbase_mem_linux.h
new file mode 100755
index 0000000..6471747
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_linux.h
@@ -0,0 +1,216 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem_linux.h
+ * Base kernel memory APIs, Linux implementation.
+ */
+
+#ifndef _KBASE_MEM_LINUX_H_
+#define _KBASE_MEM_LINUX_H_
+
+/** A HWC dump mapping */
+struct kbase_hwc_dma_mapping {
+ void *cpu_va;
+ dma_addr_t dma_pa;
+ size_t size;
+};
+
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment);
+int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages);
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
+ void __user *phandle, u64 *gpu_va, u64 *va_pages,
+ u64 *flags);
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages);
+int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask);
+int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages, enum base_backing_threshold_status *failure_reason);
+int kbase_mmap(struct file *file, struct vm_area_struct *vma);
+
+/**
+ * kbase_mem_evictable_init - Initialize the Ephemeral memory the eviction
+ * mechanism.
+ * @kctx: The kbase context to initialize.
+ *
+ * Return: Zero on success or -errno on failure.
+ */
+int kbase_mem_evictable_init(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_evictable_deinit - De-initialize the Ephemeral memory eviction
+ * mechanism.
+ * @kctx: The kbase context to de-initialize.
+ */
+void kbase_mem_evictable_deinit(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_grow_gpu_mapping - Grow the GPU mapping of an allocation
+ * @kctx: Context the region belongs to
+ * @reg: The GPU region
+ * @new_pages: The number of pages after the grow
+ * @old_pages: The number of pages before the grow
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Expand the GPU mapping to encompass the new psychical pages which have
+ * been added to the allocation.
+ *
+ * Note: Caller must be holding the region lock.
+ */
+int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_evictable_make - Make a physical allocation eligible for eviction
+ * @gpu_alloc: The physical allocation to make evictable
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Take the provided region and make all the physical pages within it
+ * reclaimable by the kernel, updating the per-process VM stats as well.
+ * Remove any CPU mappings (as these can't be removed in the shrinker callback
+ * as mmap_sem might already be taken) but leave the GPU mapping intact as
+ * and until the shrinker reclaims the allocation.
+ *
+ * Note: Must be called with the region lock of the containing context.
+ */
+int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc);
+
+/**
+ * kbase_mem_evictable_unmake - Remove a physical allocations eligibility for
+ * eviction.
+ * @alloc: The physical allocation to remove eviction eligibility from.
+ *
+ * Return: True if the allocation had its backing restored and false if
+ * it hasn't.
+ *
+ * Make the physical pages in the region no longer reclaimable and update the
+ * per-process stats, if the shrinker has already evicted the memory then
+ * re-allocate it if the region is still alive.
+ *
+ * Note: Must be called with the region lock of the containing context.
+ */
+bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);
+
+struct kbase_vmap_struct {
+ u64 gpu_addr;
+ struct kbase_mem_phy_alloc *cpu_alloc;
+ struct kbase_mem_phy_alloc *gpu_alloc;
+ phys_addr_t *cpu_pages;
+ phys_addr_t *gpu_pages;
+ void *addr;
+ size_t size;
+ bool is_cached;
+};
+
+
+/**
+ * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the
+ * requested access permissions are supported
+ * @kctx: Context the VA range belongs to
+ * @gpu_addr: Start address of VA range
+ * @size: Size of VA range
+ * @prot_request: Flags indicating how the caller will then access the memory
+ * @map: Structure to be given to kbase_vunmap() on freeing
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Map a GPU VA Range into the kernel. The VA range must be contained within a
+ * GPU memory region. Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * This is safer than using kmap() on the pages directly,
+ * because the pages here are refcounted to prevent freeing (and hence reuse
+ * elsewhere in the system) until an kbase_vunmap()
+ *
+ * The flags in @prot_request should use KBASE_REG_{CPU,GPU}_{RD,WR}, to check
+ * whether the region should allow the intended access, and return an error if
+ * disallowed. This is essential for security of imported memory, particularly
+ * a user buf from SHM mapped into the process as RO. In that case, write
+ * access must be checked if the intention is for kernel to write to the
+ * memory.
+ *
+ * The checks are also there to help catch access errors on memory where
+ * security is not a concern: imported memory that is always RW, and memory
+ * that was allocated and owned by the process attached to @kctx. In this case,
+ * it helps to identify memory that was was mapped with the wrong access type.
+ *
+ * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases
+ * where either the security of memory is solely dependent on those flags, or
+ * when userspace code was expecting only the GPU to access the memory (e.g. HW
+ * workarounds).
+ *
+ */
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ unsigned long prot_request, struct kbase_vmap_struct *map);
+
+/**
+ * kbase_vmap - Map a GPU VA range into the kernel safely
+ * @kctx: Context the VA range belongs to
+ * @gpu_addr: Start address of VA range
+ * @size: Size of VA range
+ * @map: Structure to be given to kbase_vunmap() on freeing
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Map a GPU VA Range into the kernel. The VA range must be contained within a
+ * GPU memory region. Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * This is safer than using kmap() on the pages directly,
+ * because the pages here are refcounted to prevent freeing (and hence reuse
+ * elsewhere in the system) until an kbase_vunmap()
+ *
+ * kbase_vmap_prot() should be used in preference, since kbase_vmap() makes no
+ * checks to ensure the security of e.g. imported user bufs from RO SHM.
+ */
+void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ struct kbase_vmap_struct *map);
+
+/**
+ * kbase_vunmap - Unmap a GPU VA range from the kernel
+ * @kctx: Context the VA range belongs to
+ * @map: Structure describing the mapping from the corresponding kbase_vmap()
+ * call
+ *
+ * Unmaps a GPU VA range from the kernel, given its @map structure obtained
+ * from kbase_vmap(). Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * The reference taken on pages during kbase_vmap() is released.
+ */
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map);
+
+/** @brief Allocate memory from kernel space and map it onto the GPU
+ *
+ * @param kctx The context used for the allocation/mapping
+ * @param size The size of the allocation in bytes
+ * @param handle An opaque structure used to contain the state needed to free the memory
+ * @return the VA for kernel space and GPU MMU
+ */
+void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle);
+
+/** @brief Free/unmap memory allocated by kbase_va_alloc
+ *
+ * @param kctx The context used for the allocation/mapping
+ * @param handle An opaque structure returned by the kbase_va_alloc function.
+ */
+void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle);
+
+#endif /* _KBASE_MEM_LINUX_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_mem_lowlevel.h b/midgard-0.r2p0/mali_kbase_mem_lowlevel.h
new file mode 100755
index 0000000..441180b
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_lowlevel.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_MEM_LOWLEVEL_H
+#define _KBASE_MEM_LOWLEVEL_H
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+#include <linux/dma-mapping.h>
+
+/**
+ * @brief Flags for kbase_phy_allocator_pages_alloc
+ */
+#define KBASE_PHY_PAGES_FLAG_DEFAULT (0) /** Default allocation flag */
+#define KBASE_PHY_PAGES_FLAG_CLEAR (1 << 0) /** Clear the pages after allocation */
+#define KBASE_PHY_PAGES_FLAG_POISON (1 << 1) /** Fill the memory with a poison value */
+
+#define KBASE_PHY_PAGES_SUPPORTED_FLAGS (KBASE_PHY_PAGES_FLAG_DEFAULT|KBASE_PHY_PAGES_FLAG_CLEAR|KBASE_PHY_PAGES_FLAG_POISON)
+
+#define KBASE_PHY_PAGES_POISON_VALUE 0xFD /** Value to fill the memory with when KBASE_PHY_PAGES_FLAG_POISON is set */
+
+enum kbase_sync_type {
+ KBASE_SYNC_TO_CPU,
+ KBASE_SYNC_TO_DEVICE
+};
+
+#endif /* _KBASE_LOWLEVEL_H */
diff --git a/midgard-0.r2p0/mali_kbase_mem_pool.c b/midgard-0.r2p0/mali_kbase_mem_pool.c
new file mode 100755
index 0000000..9a3f9b5
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_pool.c
@@ -0,0 +1,594 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/shrinker.h>
+#include <linux/atomic.h>
+#include <linux/version.h>
+
+/* This function is only provided for backwards compatibility with kernels
+ * which use the old carveout allocator.
+ *
+ * The forward declaration is to keep sparse happy.
+ */
+int __init kbase_carveout_mem_reserve(
+ phys_addr_t size);
+int __init kbase_carveout_mem_reserve(phys_addr_t size)
+{
+ return 0;
+}
+
+#define pool_dbg(pool, format, ...) \
+ dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
+ (pool->next_pool) ? "kctx" : "kbdev", \
+ kbase_mem_pool_size(pool), \
+ kbase_mem_pool_max_size(pool), \
+ ##__VA_ARGS__)
+
+#define NOT_DIRTY false
+#define NOT_RECLAIMED false
+
+static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
+{
+ spin_lock(&pool->pool_lock);
+}
+
+static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
+{
+ spin_unlock(&pool->pool_lock);
+}
+
+static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
+{
+ ssize_t max_size = kbase_mem_pool_max_size(pool);
+ ssize_t cur_size = kbase_mem_pool_size(pool);
+
+ return max(max_size - cur_size, (ssize_t)0);
+}
+
+static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
+{
+ return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
+}
+
+static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
+{
+ return kbase_mem_pool_size(pool) == 0;
+}
+
+static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
+ struct page *p)
+{
+ lockdep_assert_held(&pool->pool_lock);
+
+ list_add(&p->lru, &pool->page_list);
+ pool->cur_size++;
+
+ zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
+
+ pool_dbg(pool, "added page\n");
+}
+
+static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
+{
+ kbase_mem_pool_lock(pool);
+ kbase_mem_pool_add_locked(pool, p);
+ kbase_mem_pool_unlock(pool);
+}
+
+static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
+ struct list_head *page_list, size_t nr_pages)
+{
+ struct page *p;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ list_for_each_entry(p, page_list, lru) {
+ zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
+ }
+
+ list_splice(page_list, &pool->page_list);
+ pool->cur_size += nr_pages;
+
+ pool_dbg(pool, "added %zu pages\n", nr_pages);
+}
+
+static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
+ struct list_head *page_list, size_t nr_pages)
+{
+ kbase_mem_pool_lock(pool);
+ kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
+ kbase_mem_pool_unlock(pool);
+}
+
+static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
+{
+ struct page *p;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ if (kbase_mem_pool_is_empty(pool))
+ return NULL;
+
+ p = list_first_entry(&pool->page_list, struct page, lru);
+ list_del_init(&p->lru);
+ pool->cur_size--;
+
+ zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
+
+ pool_dbg(pool, "removed page\n");
+
+ return p;
+}
+
+static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
+{
+ struct page *p;
+
+ kbase_mem_pool_lock(pool);
+ p = kbase_mem_pool_remove_locked(pool);
+ kbase_mem_pool_unlock(pool);
+
+ return p;
+}
+
+static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
+ struct page *p)
+{
+ struct device *dev = pool->kbdev->dev;
+
+ dma_sync_single_for_device(dev, kbase_dma_addr(p),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
+
+static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
+ struct page *p)
+{
+ clear_highpage(p);
+ kbase_mem_pool_sync_page(pool, p);
+}
+
+static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
+ struct page *p)
+{
+ /* Zero page before spilling */
+ kbase_mem_pool_zero_page(next_pool, p);
+
+ kbase_mem_pool_add(next_pool, p);
+}
+
+struct page *kbase_mem_alloc_page(struct kbase_device *kbdev)
+{
+ struct page *p;
+ gfp_t gfp;
+ struct device *dev = kbdev->dev;
+ dma_addr_t dma_addr;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+ /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
+ gfp = GFP_USER | __GFP_ZERO;
+#else
+ gfp = GFP_HIGHUSER | __GFP_ZERO;
+#endif
+
+ if (current->flags & PF_KTHREAD) {
+ /* Don't trigger OOM killer from kernel threads, e.g. when
+ * growing memory on GPU page fault */
+ gfp |= __GFP_NORETRY;
+ }
+
+ p = alloc_page(gfp);
+ if (!p)
+ return NULL;
+
+ dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ __free_page(p);
+ return NULL;
+ }
+
+ WARN_ON(dma_addr != page_to_phys(p));
+
+ kbase_set_dma_addr(p, dma_addr);
+
+ return p;
+}
+
+static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
+ struct page *p)
+{
+ struct device *dev = pool->kbdev->dev;
+ dma_addr_t dma_addr = kbase_dma_addr(p);
+
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ kbase_clear_dma_addr(p);
+ __free_page(p);
+
+ pool_dbg(pool, "freed page to kernel\n");
+}
+
+static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
+ size_t nr_to_shrink)
+{
+ struct page *p;
+ size_t i;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
+ p = kbase_mem_pool_remove_locked(pool);
+ kbase_mem_pool_free_page(pool, p);
+ }
+
+ return i;
+}
+
+static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
+ size_t nr_to_shrink)
+{
+ size_t nr_freed;
+
+ kbase_mem_pool_lock(pool);
+ nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
+ kbase_mem_pool_unlock(pool);
+
+ return nr_freed;
+}
+
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
+ size_t nr_to_grow)
+{
+ struct page *p;
+ size_t i;
+
+ for (i = 0; i < nr_to_grow; i++) {
+ p = kbase_mem_alloc_page(pool->kbdev);
+ if (!p)
+ return -ENOMEM;
+ kbase_mem_pool_add(pool, p);
+ }
+
+ return 0;
+}
+
+void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
+{
+ size_t cur_size;
+
+ cur_size = kbase_mem_pool_size(pool);
+
+ if (new_size > pool->max_size)
+ new_size = pool->max_size;
+
+ if (new_size < cur_size)
+ kbase_mem_pool_shrink(pool, cur_size - new_size);
+ else if (new_size > cur_size)
+ kbase_mem_pool_grow(pool, new_size - cur_size);
+}
+
+void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
+{
+ size_t cur_size;
+ size_t nr_to_shrink;
+
+ kbase_mem_pool_lock(pool);
+
+ pool->max_size = max_size;
+
+ cur_size = kbase_mem_pool_size(pool);
+ if (max_size < cur_size) {
+ nr_to_shrink = cur_size - max_size;
+ kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
+ }
+
+ kbase_mem_pool_unlock(pool);
+}
+
+
+static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ struct kbase_mem_pool *pool;
+
+ pool = container_of(s, struct kbase_mem_pool, reclaim);
+ pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
+ return kbase_mem_pool_size(pool);
+}
+
+static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ struct kbase_mem_pool *pool;
+ unsigned long freed;
+
+ pool = container_of(s, struct kbase_mem_pool, reclaim);
+
+ pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
+
+ freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
+
+ pool_dbg(pool, "reclaim freed %ld pages\n", freed);
+
+ return freed;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ if (sc->nr_to_scan == 0)
+ return kbase_mem_pool_reclaim_count_objects(s, sc);
+
+ return kbase_mem_pool_reclaim_scan_objects(s, sc);
+}
+#endif
+
+int kbase_mem_pool_init(struct kbase_mem_pool *pool,
+ size_t max_size,
+ struct kbase_device *kbdev,
+ struct kbase_mem_pool *next_pool)
+{
+ pool->cur_size = 0;
+ pool->max_size = max_size;
+ pool->kbdev = kbdev;
+ pool->next_pool = next_pool;
+
+ spin_lock_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->page_list);
+
+ /* Register shrinker */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
+#else
+ pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
+ pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
+#endif
+ pool->reclaim.seeks = DEFAULT_SEEKS;
+ /* Kernel versions prior to 3.1 :
+ * struct shrinker does not define batch */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+ pool->reclaim.batch = 0;
+#endif
+ register_shrinker(&pool->reclaim);
+
+ pool_dbg(pool, "initialized\n");
+
+ return 0;
+}
+
+void kbase_mem_pool_term(struct kbase_mem_pool *pool)
+{
+ struct kbase_mem_pool *next_pool = pool->next_pool;
+ struct page *p;
+ size_t nr_to_spill = 0;
+ LIST_HEAD(spill_list);
+ int i;
+
+ pool_dbg(pool, "terminate()\n");
+
+ unregister_shrinker(&pool->reclaim);
+
+ kbase_mem_pool_lock(pool);
+ pool->max_size = 0;
+
+ if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
+ /* Spill to next pool (may overspill) */
+ nr_to_spill = kbase_mem_pool_capacity(next_pool);
+ nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
+
+ /* Zero pages first without holding the next_pool lock */
+ for (i = 0; i < nr_to_spill; i++) {
+ p = kbase_mem_pool_remove_locked(pool);
+ kbase_mem_pool_zero_page(pool, p);
+ list_add(&p->lru, &spill_list);
+ }
+ }
+
+ while (!kbase_mem_pool_is_empty(pool)) {
+ /* Free remaining pages to kernel */
+ p = kbase_mem_pool_remove_locked(pool);
+ kbase_mem_pool_free_page(pool, p);
+ }
+
+ kbase_mem_pool_unlock(pool);
+
+ if (next_pool && nr_to_spill) {
+ /* Add new page list to next_pool */
+ kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
+
+ pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
+ }
+
+ pool_dbg(pool, "terminated\n");
+}
+
+struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
+{
+ struct page *p;
+
+ do {
+ pool_dbg(pool, "alloc()\n");
+ p = kbase_mem_pool_remove(pool);
+
+ if (p)
+ return p;
+
+ pool = pool->next_pool;
+ } while (pool);
+
+ return NULL;
+}
+
+void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
+ bool dirty)
+{
+ struct kbase_mem_pool *next_pool = pool->next_pool;
+
+ pool_dbg(pool, "free()\n");
+
+ if (!kbase_mem_pool_is_full(pool)) {
+ /* Add to our own pool */
+ if (dirty)
+ kbase_mem_pool_sync_page(pool, p);
+
+ kbase_mem_pool_add(pool, p);
+ } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
+ /* Spill to next pool */
+ kbase_mem_pool_spill(next_pool, p);
+ } else {
+ /* Free page */
+ kbase_mem_pool_free_page(pool, p);
+ }
+}
+
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+ phys_addr_t *pages)
+{
+ struct page *p;
+ size_t nr_from_pool;
+ size_t i;
+ int err = -ENOMEM;
+
+ pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
+
+ /* Get pages from this pool */
+ kbase_mem_pool_lock(pool);
+ nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
+ for (i = 0; i < nr_from_pool; i++) {
+ p = kbase_mem_pool_remove_locked(pool);
+ pages[i] = page_to_phys(p);
+ }
+ kbase_mem_pool_unlock(pool);
+
+ if (i != nr_pages && pool->next_pool) {
+ /* Allocate via next pool */
+ err = kbase_mem_pool_alloc_pages(pool->next_pool,
+ nr_pages - i, pages + i);
+
+ if (err)
+ goto err_rollback;
+
+ i += nr_pages - i;
+ }
+
+ /* Get any remaining pages from kernel */
+ for (; i < nr_pages; i++) {
+ p = kbase_mem_alloc_page(pool->kbdev);
+ if (!p)
+ goto err_rollback;
+ pages[i] = page_to_phys(p);
+ }
+
+ pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
+
+ return 0;
+
+err_rollback:
+ kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
+ return err;
+}
+
+static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
+ size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
+{
+ struct page *p;
+ size_t nr_to_pool = 0;
+ LIST_HEAD(new_page_list);
+ size_t i;
+
+ if (!nr_pages)
+ return;
+
+ pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
+ nr_pages, zero, sync);
+
+ /* Zero/sync pages first without holding the pool lock */
+ for (i = 0; i < nr_pages; i++) {
+ if (unlikely(!pages[i]))
+ continue;
+
+ p = phys_to_page(pages[i]);
+
+ if (zero)
+ kbase_mem_pool_zero_page(pool, p);
+ else if (sync)
+ kbase_mem_pool_sync_page(pool, p);
+
+ list_add(&p->lru, &new_page_list);
+ nr_to_pool++;
+ pages[i] = 0;
+ }
+
+ /* Add new page list to pool */
+ kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
+
+ pool_dbg(pool, "add_array(%zu) added %zu pages\n",
+ nr_pages, nr_to_pool);
+}
+
+void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+ phys_addr_t *pages, bool dirty, bool reclaimed)
+{
+ struct kbase_mem_pool *next_pool = pool->next_pool;
+ struct page *p;
+ size_t nr_to_pool;
+ LIST_HEAD(to_pool_list);
+ size_t i = 0;
+
+ pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
+
+ if (!reclaimed) {
+ /* Add to this pool */
+ nr_to_pool = kbase_mem_pool_capacity(pool);
+ nr_to_pool = min(nr_pages, nr_to_pool);
+
+ kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
+
+ i += nr_to_pool;
+
+ if (i != nr_pages && next_pool) {
+ /* Spill to next pool (may overspill) */
+ nr_to_pool = kbase_mem_pool_capacity(next_pool);
+ nr_to_pool = min(nr_pages - i, nr_to_pool);
+
+ kbase_mem_pool_add_array(next_pool, nr_to_pool,
+ pages + i, true, dirty);
+ i += nr_to_pool;
+ }
+ }
+
+ /* Free any remaining pages to kernel */
+ for (; i < nr_pages; i++) {
+ if (unlikely(!pages[i]))
+ continue;
+
+ p = phys_to_page(pages[i]);
+ if (reclaimed)
+ zone_page_state_add(-1, page_zone(p),
+ NR_SLAB_RECLAIMABLE);
+
+ kbase_mem_pool_free_page(pool, p);
+ pages[i] = 0;
+ }
+
+ pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
+}
diff --git a/midgard-0.r2p0/mali_kbase_mem_pool_debugfs.c b/midgard-0.r2p0/mali_kbase_mem_pool_debugfs.c
new file mode 100755
index 0000000..585fba0
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_pool_debugfs.c
@@ -0,0 +1,81 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <mali_kbase_mem_pool_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static int kbase_mem_pool_debugfs_size_get(void *data, u64 *val)
+{
+ struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+ *val = kbase_mem_pool_size(pool);
+
+ return 0;
+}
+
+static int kbase_mem_pool_debugfs_size_set(void *data, u64 val)
+{
+ struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+ kbase_mem_pool_trim(pool, val);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_size_fops,
+ kbase_mem_pool_debugfs_size_get,
+ kbase_mem_pool_debugfs_size_set,
+ "%llu\n");
+
+static int kbase_mem_pool_debugfs_max_size_get(void *data, u64 *val)
+{
+ struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+ *val = kbase_mem_pool_max_size(pool);
+
+ return 0;
+}
+
+static int kbase_mem_pool_debugfs_max_size_set(void *data, u64 val)
+{
+ struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+ kbase_mem_pool_set_max_size(pool, val);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_max_size_fops,
+ kbase_mem_pool_debugfs_max_size_get,
+ kbase_mem_pool_debugfs_max_size_set,
+ "%llu\n");
+
+void kbase_mem_pool_debugfs_init(struct dentry *parent,
+ struct kbase_mem_pool *pool)
+{
+ debugfs_create_file("mem_pool_size", S_IRUGO | S_IWUSR, parent,
+ pool, &kbase_mem_pool_debugfs_size_fops);
+
+ debugfs_create_file("mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
+ pool, &kbase_mem_pool_debugfs_max_size_fops);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/midgard-0.r2p0/mali_kbase_mem_pool_debugfs.h b/midgard-0.r2p0/mali_kbase_mem_pool_debugfs.h
new file mode 100755
index 0000000..1442854
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_pool_debugfs.h
@@ -0,0 +1,36 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_MEM_POOL_DEBUGFS_H
+#define _KBASE_MEM_POOL_DEBUGFS_H
+
+#include <mali_kbase.h>
+
+/**
+ * kbase_mem_pool_debugfs_init - add debugfs knobs for @pool
+ * @parent: Parent debugfs dentry
+ * @pool: Memory pool to control
+ *
+ * Adds two debugfs files under @parent:
+ * - mem_pool_size: get/set the current size of @pool
+ * - mem_pool_max_size: get/set the max size of @pool
+ */
+void kbase_mem_pool_debugfs_init(struct dentry *parent,
+ struct kbase_mem_pool *pool);
+
+#endif /*_KBASE_MEM_POOL_DEBUGFS_H*/
+
diff --git a/midgard-0.r2p0/mali_kbase_mem_profile_debugfs.c b/midgard-0.r2p0/mali_kbase_mem_profile_debugfs.c
new file mode 100755
index 0000000..092da9a
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_profile_debugfs.c
@@ -0,0 +1,119 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/** Show callback for the @c mem_profile debugfs file.
+ *
+ * This function is called to get the contents of the @c mem_profile debugfs
+ * file. This is a report of current memory usage and distribution in userspace.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if it successfully prints data in debugfs entry file, non-zero otherwise
+ */
+static int kbasep_mem_profile_seq_show(struct seq_file *sfile, void *data)
+{
+ struct kbase_context *kctx = sfile->private;
+
+ mutex_lock(&kctx->mem_profile_lock);
+
+ seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size);
+
+ seq_putc(sfile, '\n');
+
+ mutex_unlock(&kctx->mem_profile_lock);
+
+ return 0;
+}
+
+/*
+ * File operations related to debugfs entry for mem_profile
+ */
+static int kbasep_mem_profile_debugfs_open(struct inode *in, struct file *file)
+{
+ return single_open(file, kbasep_mem_profile_seq_show, in->i_private);
+}
+
+static const struct file_operations kbasep_mem_profile_debugfs_fops = {
+ .open = kbasep_mem_profile_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+ size_t size)
+{
+ int err = 0;
+
+ mutex_lock(&kctx->mem_profile_lock);
+
+ dev_dbg(kctx->kbdev->dev, "initialised: %d",
+ kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+ if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
+ if (!debugfs_create_file("mem_profile", S_IRUGO,
+ kctx->kctx_dentry, kctx,
+ &kbasep_mem_profile_debugfs_fops)) {
+ err = -EAGAIN;
+ } else {
+ kbase_ctx_flag_set(kctx,
+ KCTX_MEM_PROFILE_INITIALIZED);
+ }
+ }
+
+ if (kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
+ kfree(kctx->mem_profile_data);
+ kctx->mem_profile_data = data;
+ kctx->mem_profile_size = size;
+ }
+
+ dev_dbg(kctx->kbdev->dev, "returning: %d, initialised: %d",
+ err, kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+ mutex_unlock(&kctx->mem_profile_lock);
+
+ return err;
+}
+
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx)
+{
+ mutex_lock(&kctx->mem_profile_lock);
+
+ dev_dbg(kctx->kbdev->dev, "initialised: %d",
+ kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+ kfree(kctx->mem_profile_data);
+ kctx->mem_profile_data = NULL;
+ kctx->mem_profile_size = 0;
+
+ mutex_unlock(&kctx->mem_profile_lock);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+ size_t size)
+{
+ kfree(data);
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
diff --git a/midgard-0.r2p0/mali_kbase_mem_profile_debugfs.h b/midgard-0.r2p0/mali_kbase_mem_profile_debugfs.h
new file mode 100755
index 0000000..a1dc2e0
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_profile_debugfs.h
@@ -0,0 +1,59 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem_profile_debugfs.h
+ * Header file for mem profiles entries in debugfs
+ *
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_H
+#define _KBASE_MEM_PROFILE_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Remove entry from Mali memory profile debugfs
+ */
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx);
+
+/**
+ * @brief Insert @p data to the debugfs file so it can be read by userspace
+ *
+ * The function takes ownership of @p data and frees it later when new data
+ * is inserted.
+ *
+ * If the debugfs entry corresponding to the @p kctx doesn't exist,
+ * an attempt will be made to create it.
+ *
+ * @param kctx The context whose debugfs file @p data should be inserted to
+ * @param data A NULL-terminated string to be inserted to the debugfs file,
+ * without the trailing new line character
+ * @param size The length of the @p data string
+ * @return 0 if @p data inserted correctly
+ * -EAGAIN in case of error
+ * @post @ref mem_profile_initialized will be set to @c true
+ * the first time this function succeeds.
+ */
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+ size_t size);
+
+#endif /*_KBASE_MEM_PROFILE_DEBUGFS_H*/
+
diff --git a/midgard-0.r2p0/mali_kbase_mem_profile_debugfs_buf_size.h b/midgard-0.r2p0/mali_kbase_mem_profile_debugfs_buf_size.h
new file mode 100755
index 0000000..82f0702
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mem_profile_debugfs_buf_size.h
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_profile_debugfs_buf_size.h
+ * Header file for the size of the buffer to accumulate the histogram report text in
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+#define _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+
+/**
+ * The size of the buffer to accumulate the histogram report text in
+ * @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT
+ */
+#define KBASE_MEM_PROFILE_MAX_BUF_SIZE ((size_t) (64 + ((80 + (56 * 64)) * 15) + 56))
+
+#endif /*_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/
+
diff --git a/midgard-0.r2p0/mali_kbase_mmu.c b/midgard-0.r2p0/mali_kbase_mmu.c
new file mode 100755
index 0000000..da1689c
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mmu.c
@@ -0,0 +1,2059 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mmu.c
+ * Base kernel MMU management.
+ */
+
+/* #define DEBUG 1 */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_debug.h>
+
+#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#include <mali_kbase_defs.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_time.h>
+#include <mali_kbase_mem.h>
+
+#define KBASE_MMU_PAGE_ENTRIES 512
+
+/**
+ * kbase_mmu_flush_invalidate() - Flush and invalidate the GPU caches.
+ * @kctx: The KBase context.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ *
+ * Issue a cache flush + invalidate to the GPU caches and invalidate the TLBs.
+ *
+ * If sync is not set then transactions still in flight when the flush is issued
+ * may use the old page tables and the data they write will not be written out
+ * to memory, this function returns after the flush has been issued but
+ * before all accesses which might effect the flushed region have completed.
+ *
+ * If sync is set then accesses in the flushed region will be drained
+ * before data is flush and invalidated through L1, L2 and into memory,
+ * after which point this function will return.
+ */
+static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
+ u64 vpfn, size_t nr, bool sync);
+
+/**
+ * kbase_mmu_sync_pgd - sync page directory to memory
+ * @kbdev: Device pointer.
+ * @handle: Address of DMA region.
+ * @size: Size of the region to sync.
+ *
+ * This should be called after each page directory update.
+ */
+
+static void kbase_mmu_sync_pgd(struct kbase_device *kbdev,
+ dma_addr_t handle, size_t size)
+{
+ /* If page table is not coherent then ensure the gpu can read
+ * the pages from memory
+ */
+ if (kbdev->system_coherency != COHERENCY_ACE)
+ dma_sync_single_for_device(kbdev->dev, handle, size,
+ DMA_TO_DEVICE);
+}
+
+/*
+ * Definitions:
+ * - PGD: Page Directory.
+ * - PTE: Page Table Entry. A 64bit value pointing to the next
+ * level of translation
+ * - ATE: Address Transation Entry. A 64bit value pointing to
+ * a 4kB physical page.
+ */
+
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, const char *reason_str);
+
+
+static size_t make_multiple(size_t minimum, size_t multiple)
+{
+ size_t remainder = minimum % multiple;
+
+ if (remainder == 0)
+ return minimum;
+
+ return minimum + multiple - remainder;
+}
+
+void page_fault_worker(struct work_struct *data)
+{
+ u64 fault_pfn;
+ u32 fault_status;
+ size_t new_pages;
+ size_t fault_rel_pfn;
+ struct kbase_as *faulting_as;
+ int as_no;
+ struct kbase_context *kctx;
+ struct kbase_device *kbdev;
+ struct kbase_va_region *region;
+ int err;
+ bool grown = false;
+
+ faulting_as = container_of(data, struct kbase_as, work_pagefault);
+ fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
+ as_no = faulting_as->number;
+
+ kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+
+ /* Grab the context that was already refcounted in kbase_mmu_interrupt().
+ * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+ */
+ kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
+ if (WARN_ON(!kctx)) {
+ atomic_dec(&kbdev->faults_pending);
+ return;
+ }
+
+ KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);
+
+ if (unlikely(faulting_as->protected_mode))
+ {
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Protected mode fault");
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+
+ goto fault_done;
+ }
+
+ fault_status = faulting_as->fault_status;
+ switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT:
+ /* need to check against the region to handle this one */
+ break;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Permission failure");
+ goto fault_done;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Translation table bus fault");
+ goto fault_done;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
+ /* nothing to do, but we don't expect this fault currently */
+ dev_warn(kbdev->dev, "Access flag unexpectedly set");
+ goto fault_done;
+
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ case AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT:
+
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Address size fault");
+ goto fault_done;
+
+ case AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Memory attributes fault");
+ goto fault_done;
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+
+ default:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Unknown fault code");
+ goto fault_done;
+ }
+
+ /* so we have a translation fault, let's see if it is for growable
+ * memory */
+ kbase_gpu_vm_lock(kctx);
+
+ region = kbase_region_tracker_find_region_enclosing_address(kctx,
+ faulting_as->fault_addr);
+ if (!region || region->flags & KBASE_REG_FREE) {
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Memory is not mapped on the GPU");
+ goto fault_done;
+ }
+
+ if ((region->flags & GROWABLE_FLAGS_REQUIRED)
+ != GROWABLE_FLAGS_REQUIRED) {
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Memory is not growable");
+ goto fault_done;
+ }
+
+ if ((region->flags & KBASE_REG_DONT_NEED)) {
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Don't need memory can't be grown");
+ goto fault_done;
+ }
+
+ /* find the size we need to grow it by */
+ /* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address
+ * validating the fault_adress to be within a size_t from the start_pfn */
+ fault_rel_pfn = fault_pfn - region->start_pfn;
+
+ if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
+ dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
+ faulting_as->fault_addr, region->start_pfn,
+ region->start_pfn +
+ kbase_reg_current_backed_size(region));
+
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ /* [1] in case another page fault occurred while we were
+ * handling the (duplicate) page fault we need to ensure we
+ * don't loose the other page fault as result of us clearing
+ * the MMU IRQ. Therefore, after we clear the MMU IRQ we send
+ * an UNLOCK command that will retry any stalled memory
+ * transaction (which should cause the other page fault to be
+ * raised again).
+ */
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
+ AS_COMMAND_UNLOCK, 1);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_gpu_vm_unlock(kctx);
+
+ goto fault_done;
+ }
+
+ new_pages = make_multiple(fault_rel_pfn -
+ kbase_reg_current_backed_size(region) + 1,
+ region->extent);
+
+ /* cap to max vsize */
+ if (new_pages + kbase_reg_current_backed_size(region) >
+ region->nr_pages)
+ new_pages = region->nr_pages -
+ kbase_reg_current_backed_size(region);
+
+ if (0 == new_pages) {
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ /* Duplicate of a fault we've already handled, nothing to do */
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ /* See comment [1] about UNLOCK usage */
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
+ AS_COMMAND_UNLOCK, 1);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_gpu_vm_unlock(kctx);
+ goto fault_done;
+ }
+
+ if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) {
+ if (region->gpu_alloc != region->cpu_alloc) {
+ if (kbase_alloc_phy_pages_helper(
+ region->cpu_alloc, new_pages) == 0) {
+ grown = true;
+ } else {
+ kbase_free_phy_pages_helper(region->gpu_alloc,
+ new_pages);
+ }
+ } else {
+ grown = true;
+ }
+ }
+
+
+ if (grown) {
+ u64 pfn_offset;
+ u32 op;
+
+ /* alloc success */
+ KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);
+
+ /* set up the new pages */
+ pfn_offset = kbase_reg_current_backed_size(region) - new_pages;
+ /*
+ * Note:
+ * Issuing an MMU operation will unlock the MMU and cause the
+ * translation to be replayed. If the page insertion fails then
+ * rather then trying to continue the context should be killed
+ * so the no_flush version of insert_pages is used which allows
+ * us to unlock the MMU as we see fit.
+ */
+ err = kbase_mmu_insert_pages_no_flush(kctx,
+ region->start_pfn + pfn_offset,
+ &kbase_get_gpu_phy_pages(region)[pfn_offset],
+ new_pages, region->flags);
+ if (err) {
+ kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
+ if (region->gpu_alloc != region->cpu_alloc)
+ kbase_free_phy_pages_helper(region->cpu_alloc,
+ new_pages);
+ kbase_gpu_vm_unlock(kctx);
+ /* The locked VA region will be unlocked and the cache invalidated in here */
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Page table update failure");
+ goto fault_done;
+ }
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+ kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
+#endif
+ kbase_tlstream_aux_pagefault(kctx->id, (u64)new_pages);
+
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ /* flush L2 and unlock the VA (resumes the MMU) */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
+ op = AS_COMMAND_FLUSH;
+ else
+ op = AS_COMMAND_FLUSH_PT;
+
+ /* clear MMU interrupt - this needs to be done after updating
+ * the page tables but before issuing a FLUSH command. The
+ * FLUSH cmd has a side effect that it restarts stalled memory
+ * transactions in other address spaces which may cause
+ * another fault to occur. If we didn't clear the interrupt at
+ * this stage a new IRQ might not be raised when the GPU finds
+ * a MMU IRQ is already pending.
+ */
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx,
+ faulting_as->fault_addr >> PAGE_SHIFT,
+ new_pages,
+ op, 1);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+ /* reenable this in the mask */
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_gpu_vm_unlock(kctx);
+ } else {
+ /* failed to extend, handle as a normal PF */
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Page allocation failure");
+ }
+
+fault_done:
+ /*
+ * By this point, the fault was handled in some way,
+ * so release the ctx refcount
+ */
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+ atomic_dec(&kbdev->faults_pending);
+}
+
+phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx)
+{
+ u64 *page;
+ int i;
+ struct page *p;
+ int new_page_count __maybe_unused;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ new_page_count = kbase_atomic_add_pages(1, &kctx->used_pages);
+ kbase_atomic_add_pages(1, &kctx->kbdev->memdev.used_pages);
+
+ p = kbase_mem_pool_alloc(&kctx->mem_pool);
+ if (!p)
+ goto sub_pages;
+
+ kbase_tlstream_aux_pagesalloc(
+ (u32)kctx->id,
+ (u64)new_page_count);
+
+ page = kmap(p);
+ if (NULL == page)
+ goto alloc_free;
+
+ kbase_process_page_usage_inc(kctx, 1);
+
+ for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++)
+ kctx->kbdev->mmu_mode->entry_invalidate(&page[i]);
+
+ kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE);
+
+ kunmap(p);
+ return page_to_phys(p);
+
+alloc_free:
+ kbase_mem_pool_free(&kctx->mem_pool, p, false);
+sub_pages:
+ kbase_atomic_sub_pages(1, &kctx->used_pages);
+ kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_alloc_pgd);
+
+/* Given PGD PFN for level N, return PGD PFN for level N+1, allocating the
+ * new table from the pool if needed and possible
+ */
+static int mmu_get_next_pgd(struct kbase_context *kctx,
+ phys_addr_t *pgd, u64 vpfn, int level)
+{
+ u64 *page;
+ phys_addr_t target_pgd;
+ struct page *p;
+
+ KBASE_DEBUG_ASSERT(*pgd);
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ /*
+ * Architecture spec defines level-0 as being the top-most.
+ * This is a bit unfortunate here, but we keep the same convention.
+ */
+ vpfn >>= (3 - level) * 9;
+ vpfn &= 0x1FF;
+
+ p = pfn_to_page(PFN_DOWN(*pgd));
+ page = kmap(p);
+ if (NULL == page) {
+ dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n");
+ return -EINVAL;
+ }
+
+ target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
+
+ if (!target_pgd) {
+ target_pgd = kbase_mmu_alloc_pgd(kctx);
+ if (!target_pgd) {
+ dev_dbg(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
+ kunmap(p);
+ return -ENOMEM;
+ }
+
+ kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
+
+ kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE);
+ /* Rely on the caller to update the address space flags. */
+ }
+
+ kunmap(p);
+ *pgd = target_pgd;
+
+ return 0;
+}
+
+static int mmu_get_bottom_pgd(struct kbase_context *kctx,
+ u64 vpfn, phys_addr_t *out_pgd)
+{
+ phys_addr_t pgd;
+ int l;
+
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ pgd = kctx->pgd;
+ for (l = MIDGARD_MMU_TOPLEVEL; l < MIDGARD_MMU_BOTTOMLEVEL; l++) {
+ int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l);
+ /* Handle failure condition */
+ if (err) {
+ dev_dbg(kctx->kbdev->dev, "mmu_get_bottom_pgd: mmu_get_next_pgd failure\n");
+ return err;
+ }
+ }
+
+ *out_pgd = pgd;
+
+ return 0;
+}
+
+static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
+{
+ u64 *page;
+ phys_addr_t target_pgd;
+
+ KBASE_DEBUG_ASSERT(pgd);
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ /*
+ * Architecture spec defines level-0 as being the top-most.
+ * This is a bit unfortunate here, but we keep the same convention.
+ */
+ vpfn >>= (3 - level) * 9;
+ vpfn &= 0x1FF;
+
+ page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
+ /* kmap_atomic should NEVER fail */
+ KBASE_DEBUG_ASSERT(NULL != page);
+
+ target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
+ /* As we are recovering from what has already been set up, we should have a target_pgd */
+ KBASE_DEBUG_ASSERT(0 != target_pgd);
+ kunmap_atomic(page);
+ return target_pgd;
+}
+
+static phys_addr_t mmu_insert_pages_recover_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn)
+{
+ phys_addr_t pgd;
+ int l;
+
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ pgd = kctx->pgd;
+
+ for (l = MIDGARD_MMU_TOPLEVEL; l < MIDGARD_MMU_BOTTOMLEVEL; l++) {
+ pgd = mmu_insert_pages_recover_get_next_pgd(kctx, pgd, vpfn, l);
+ /* Should never fail */
+ KBASE_DEBUG_ASSERT(0 != pgd);
+ }
+
+ return pgd;
+}
+
+static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx, u64 vpfn,
+ size_t nr)
+{
+ phys_addr_t pgd;
+ u64 *pgd_page;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(0 != vpfn);
+ /* 64-bit address range is the max */
+ KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ while (nr) {
+ unsigned int i;
+ unsigned int index = vpfn & 0x1FF;
+ unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+ struct page *p;
+
+ if (count > nr)
+ count = nr;
+
+ pgd = mmu_insert_pages_recover_get_bottom_pgd(kctx, vpfn);
+ KBASE_DEBUG_ASSERT(0 != pgd);
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+
+ pgd_page = kmap_atomic(p);
+ KBASE_DEBUG_ASSERT(NULL != pgd_page);
+
+ /* Invalidate the entries we added */
+ for (i = 0; i < count; i++)
+ mmu_mode->entry_invalidate(&pgd_page[index + i]);
+
+ vpfn += count;
+ nr -= count;
+
+ kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE);
+
+ kunmap_atomic(pgd_page);
+ }
+}
+
+/*
+ * Map the single page 'phys' 'nr' of times, starting at GPU PFN 'vpfn'
+ */
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+ phys_addr_t phys, size_t nr,
+ unsigned long flags)
+{
+ phys_addr_t pgd;
+ u64 *pgd_page;
+ /* In case the insert_single_page only partially completes we need to be
+ * able to recover */
+ bool recover_required = false;
+ u64 recover_vpfn = vpfn;
+ size_t recover_count = 0;
+ size_t remain = nr;
+ int err;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(0 != vpfn);
+ /* 64-bit address range is the max */
+ KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return 0;
+
+ mutex_lock(&kctx->mmu_lock);
+
+ while (remain) {
+ unsigned int i;
+ unsigned int index = vpfn & 0x1FF;
+ unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+ struct page *p;
+
+ if (count > remain)
+ count = remain;
+
+ /*
+ * Repeatedly calling mmu_get_bottom_pte() is clearly
+ * suboptimal. We don't have to re-parse the whole tree
+ * each time (just cache the l0-l2 sequence).
+ * On the other hand, it's only a gain when we map more than
+ * 256 pages at once (on average). Do we really care?
+ */
+ do {
+ err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
+ if (err != -ENOMEM)
+ break;
+ /* Fill the memory pool with enough pages for
+ * the page walk to succeed
+ */
+ mutex_unlock(&kctx->mmu_lock);
+ err = kbase_mem_pool_grow(&kctx->mem_pool,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ mutex_lock(&kctx->mmu_lock);
+ } while (!err);
+ if (err) {
+ dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
+ if (recover_required) {
+ /* Invalidate the pages we have partially
+ * completed */
+ mmu_insert_pages_failure_recovery(kctx,
+ recover_vpfn,
+ recover_count);
+ }
+ goto fail_unlock;
+ }
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+ pgd_page = kmap(p);
+ if (!pgd_page) {
+ dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n");
+ if (recover_required) {
+ /* Invalidate the pages we have partially
+ * completed */
+ mmu_insert_pages_failure_recovery(kctx,
+ recover_vpfn,
+ recover_count);
+ }
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ for (i = 0; i < count; i++) {
+ unsigned int ofs = index + i;
+
+ KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL));
+ kctx->kbdev->mmu_mode->entry_set_ate(&pgd_page[ofs],
+ phys, flags);
+ }
+
+ vpfn += count;
+ remain -= count;
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(p) + (index * sizeof(u64)),
+ count * sizeof(u64));
+
+ kunmap(p);
+ /* We have started modifying the page table.
+ * If further pages need inserting and fail we need to undo what
+ * has already taken place */
+ recover_required = true;
+ recover_count += count;
+ }
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ return err;
+}
+
+int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+ phys_addr_t *phys, size_t nr,
+ unsigned long flags)
+{
+ phys_addr_t pgd;
+ u64 *pgd_page;
+ /* In case the insert_pages only partially completes we need to be able
+ * to recover */
+ bool recover_required = false;
+ u64 recover_vpfn = vpfn;
+ size_t recover_count = 0;
+ size_t remain = nr;
+ int err;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(0 != vpfn);
+ /* 64-bit address range is the max */
+ KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return 0;
+
+ mutex_lock(&kctx->mmu_lock);
+
+ while (remain) {
+ unsigned int i;
+ unsigned int index = vpfn & 0x1FF;
+ unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+ struct page *p;
+
+ if (count > remain)
+ count = remain;
+
+ /*
+ * Repeatedly calling mmu_get_bottom_pte() is clearly
+ * suboptimal. We don't have to re-parse the whole tree
+ * each time (just cache the l0-l2 sequence).
+ * On the other hand, it's only a gain when we map more than
+ * 256 pages at once (on average). Do we really care?
+ */
+ do {
+ err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
+ if (err != -ENOMEM)
+ break;
+ /* Fill the memory pool with enough pages for
+ * the page walk to succeed
+ */
+ mutex_unlock(&kctx->mmu_lock);
+ err = kbase_mem_pool_grow(&kctx->mem_pool,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ mutex_lock(&kctx->mmu_lock);
+ } while (!err);
+ if (err) {
+ dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
+ if (recover_required) {
+ /* Invalidate the pages we have partially
+ * completed */
+ mmu_insert_pages_failure_recovery(kctx,
+ recover_vpfn,
+ recover_count);
+ }
+ goto fail_unlock;
+ }
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+ pgd_page = kmap(p);
+ if (!pgd_page) {
+ dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n");
+ if (recover_required) {
+ /* Invalidate the pages we have partially
+ * completed */
+ mmu_insert_pages_failure_recovery(kctx,
+ recover_vpfn,
+ recover_count);
+ }
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ for (i = 0; i < count; i++) {
+ unsigned int ofs = index + i;
+
+ KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL));
+ kctx->kbdev->mmu_mode->entry_set_ate(&pgd_page[ofs],
+ phys[i], flags);
+ }
+
+ phys += count;
+ vpfn += count;
+ remain -= count;
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(p) + (index * sizeof(u64)),
+ count * sizeof(u64));
+
+ kunmap(p);
+ /* We have started modifying the page table. If further pages
+ * need inserting and fail we need to undo what has already
+ * taken place */
+ recover_required = true;
+ recover_count += count;
+ }
+
+ mutex_unlock(&kctx->mmu_lock);
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&kctx->mmu_lock);
+ return err;
+}
+
+/*
+ * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn'
+ */
+int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
+ phys_addr_t *phys, size_t nr,
+ unsigned long flags)
+{
+ int err;
+
+ err = kbase_mmu_insert_pages_no_flush(kctx, vpfn, phys, nr, flags);
+ kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages);
+
+/**
+ * kbase_mmu_flush_invalidate_noretain() - Flush and invalidate the GPU caches
+ * without retaining the kbase context.
+ * @kctx: The KBase context.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ *
+ * As per kbase_mmu_flush_invalidate but doesn't retain the kctx or do any
+ * other locking.
+ */
+static void kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx,
+ u64 vpfn, size_t nr, bool sync)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ int err;
+ u32 op;
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return;
+
+ if (sync)
+ op = AS_COMMAND_FLUSH_MEM;
+ else
+ op = AS_COMMAND_FLUSH_PT;
+
+ err = kbase_mmu_hw_do_operation(kbdev,
+ &kbdev->as[kctx->as_nr],
+ kctx, vpfn, nr, op, 0);
+#if KBASE_GPU_RESET_EN
+ if (err) {
+ /* Flush failed to complete, assume the
+ * GPU has hung and perform a reset to
+ * recover */
+ dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issuing GPU soft-reset to recover\n");
+
+ if (kbase_prepare_to_reset_gpu_locked(kbdev))
+ kbase_reset_gpu_locked(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+
+#ifndef CONFIG_MALI_NO_MALI
+ /*
+ * As this function could be called in interrupt context the sync
+ * request can't block. Instead log the request and the next flush
+ * request will pick it up.
+ */
+ if ((!err) && sync &&
+ kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367))
+ atomic_set(&kctx->drain_pending, 1);
+#endif /* !CONFIG_MALI_NO_MALI */
+}
+
+static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
+ u64 vpfn, size_t nr, bool sync)
+{
+ struct kbase_device *kbdev;
+ bool ctx_is_in_runpool;
+#ifndef CONFIG_MALI_NO_MALI
+ bool drain_pending = false;
+
+ if (atomic_xchg(&kctx->drain_pending, 0))
+ drain_pending = true;
+#endif /* !CONFIG_MALI_NO_MALI */
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return;
+
+ kbdev = kctx->kbdev;
+ ctx_is_in_runpool = kbasep_js_runpool_retain_ctx(kbdev, kctx);
+
+ if (ctx_is_in_runpool) {
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ if (!kbase_pm_context_active_handle_suspend(kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ int err;
+ u32 op;
+
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ if (sync)
+ op = AS_COMMAND_FLUSH_MEM;
+ else
+ op = AS_COMMAND_FLUSH_PT;
+
+ err = kbase_mmu_hw_do_operation(kbdev,
+ &kbdev->as[kctx->as_nr],
+ kctx, vpfn, nr, op, 0);
+
+#if KBASE_GPU_RESET_EN
+ if (err) {
+ /* Flush failed to complete, assume the
+ * GPU has hung and perform a reset to
+ * recover */
+ dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n");
+
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+#ifndef CONFIG_MALI_NO_MALI
+ /*
+ * The transaction lock must be dropped before here
+ * as kbase_wait_write_flush could take it if
+ * the GPU was powered down (static analysis doesn't
+ * know this can't happen).
+ */
+ drain_pending |= (!err) && sync &&
+ kbase_hw_has_issue(kctx->kbdev,
+ BASE_HW_ISSUE_6367);
+ if (drain_pending) {
+ /* Wait for GPU to flush write buffer */
+ kbase_wait_write_flush(kctx);
+ }
+#endif /* !CONFIG_MALI_NO_MALI */
+
+ kbase_pm_context_idle(kbdev);
+ }
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ }
+}
+
+void kbase_mmu_update(struct kbase_context *kctx)
+{
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+ lockdep_assert_held(&kctx->kbdev->mmu_hw_mutex);
+ /* ASSERT that the context has a valid as_nr, which is only the case
+ * when it's scheduled in.
+ *
+ * as_nr won't change because the caller has the hwaccess_lock */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ kctx->kbdev->mmu_mode->update(kctx);
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_update);
+
+void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+ kbdev->mmu_mode->disable_as(kbdev, as_nr);
+}
+
+void kbase_mmu_disable(struct kbase_context *kctx)
+{
+ /* ASSERT that the context has a valid as_nr, which is only the case
+ * when it's scheduled in.
+ *
+ * as_nr won't change because the caller has the hwaccess_lock */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+ /*
+ * The address space is being disabled, drain all knowledge of it out
+ * from the caches as pages and page tables might be freed after this.
+ *
+ * The job scheduler code will already be holding the locks and context
+ * so just do the flush.
+ */
+ kbase_mmu_flush_invalidate_noretain(kctx, 0, ~0, true);
+
+ kctx->kbdev->mmu_mode->disable_as(kctx->kbdev, kctx->as_nr);
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_disable);
+
+/*
+ * We actually only discard the ATE, and not the page table
+ * pages. There is a potential DoS here, as we'll leak memory by
+ * having PTEs that are potentially unused. Will require physical
+ * page accounting, so MMU pages are part of the process allocation.
+ *
+ * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
+ * currently scheduled into the runpool, and so potentially uses a lot of locks.
+ * These locks must be taken in the correct order with respect to others
+ * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
+ * information.
+ */
+int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
+{
+ phys_addr_t pgd;
+ u64 *pgd_page;
+ struct kbase_device *kbdev;
+ size_t requested_nr = nr;
+ struct kbase_mmu_mode const *mmu_mode;
+ int err;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ beenthere(kctx, "kctx %p vpfn %lx nr %zd", (void *)kctx, (unsigned long)vpfn, nr);
+
+ if (0 == nr) {
+ /* early out if nothing to do */
+ return 0;
+ }
+
+ mutex_lock(&kctx->mmu_lock);
+
+ kbdev = kctx->kbdev;
+ mmu_mode = kbdev->mmu_mode;
+
+ while (nr) {
+ unsigned int i;
+ unsigned int index = vpfn & 0x1FF;
+ unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+ struct page *p;
+
+ if (count > nr)
+ count = nr;
+
+ err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
+ if (err) {
+ dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: mmu_get_bottom_pgd failure\n");
+ err = -EINVAL;
+ goto fail_unlock;
+ }
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+ pgd_page = kmap(p);
+ if (!pgd_page) {
+ dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: kmap failure\n");
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ for (i = 0; i < count; i++)
+ mmu_mode->entry_invalidate(&pgd_page[index + i]);
+
+ vpfn += count;
+ nr -= count;
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(p) + (index * sizeof(u64)),
+ count * sizeof(u64));
+
+ kunmap(p);
+ }
+
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true);
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true);
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages);
+
+/**
+ * Update the entries for specified number of pages pointed to by 'phys' at GPU PFN 'vpfn'.
+ * This call is being triggered as a response to the changes of the mem attributes
+ *
+ * @pre : The caller is responsible for validating the memory attributes
+ *
+ * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
+ * currently scheduled into the runpool, and so potentially uses a lot of locks.
+ * These locks must be taken in the correct order with respect to others
+ * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
+ * information.
+ */
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags)
+{
+ phys_addr_t pgd;
+ u64 *pgd_page;
+ size_t requested_nr = nr;
+ struct kbase_mmu_mode const *mmu_mode;
+ int err;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(0 != vpfn);
+ KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+ /* Early out if there is nothing to do */
+ if (nr == 0)
+ return 0;
+
+ mutex_lock(&kctx->mmu_lock);
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages(): updating page share flags on GPU PFN 0x%llx from phys %p, %zu pages",
+ vpfn, phys, nr);
+
+ while (nr) {
+ unsigned int i;
+ unsigned int index = vpfn & 0x1FF;
+ size_t count = KBASE_MMU_PAGE_ENTRIES - index;
+ struct page *p;
+
+ if (count > nr)
+ count = nr;
+
+ do {
+ err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
+ if (err != -ENOMEM)
+ break;
+ /* Fill the memory pool with enough pages for
+ * the page walk to succeed
+ */
+ mutex_unlock(&kctx->mmu_lock);
+ err = kbase_mem_pool_grow(&kctx->mem_pool,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ mutex_lock(&kctx->mmu_lock);
+ } while (!err);
+ if (err) {
+ dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd failure\n");
+ goto fail_unlock;
+ }
+
+ p = pfn_to_page(PFN_DOWN(pgd));
+ pgd_page = kmap(p);
+ if (!pgd_page) {
+ dev_warn(kctx->kbdev->dev, "kmap failure\n");
+ err = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ for (i = 0; i < count; i++)
+ mmu_mode->entry_set_ate(&pgd_page[index + i], phys[i],
+ flags);
+
+ phys += count;
+ vpfn += count;
+ nr -= count;
+
+ kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_dma_addr(p) + (index * sizeof(u64)),
+ count * sizeof(u64));
+
+ kunmap(pfn_to_page(PFN_DOWN(pgd)));
+ }
+
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true);
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&kctx->mmu_lock);
+ kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true);
+ return err;
+}
+
+/* This is a debug feature only */
+static void mmu_check_unused(struct kbase_context *kctx, phys_addr_t pgd)
+{
+ u64 *page;
+ int i;
+
+ page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
+ /* kmap_atomic should NEVER fail. */
+ KBASE_DEBUG_ASSERT(NULL != page);
+
+ for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+ if (kctx->kbdev->mmu_mode->ate_is_valid(page[i]))
+ beenthere(kctx, "live pte %016lx", (unsigned long)page[i]);
+ }
+ kunmap_atomic(page);
+}
+
+static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd, int level, int zap, u64 *pgd_page_buffer)
+{
+ phys_addr_t target_pgd;
+ u64 *pgd_page;
+ int i;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
+ /* kmap_atomic should NEVER fail. */
+ KBASE_DEBUG_ASSERT(NULL != pgd_page);
+ /* Copy the page to our preallocated buffer so that we can minimize kmap_atomic usage */
+ memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE);
+ kunmap_atomic(pgd_page);
+ pgd_page = pgd_page_buffer;
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+ target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]);
+
+ if (target_pgd) {
+ if (level < (MIDGARD_MMU_BOTTOMLEVEL - 1)) {
+ mmu_teardown_level(kctx, target_pgd, level + 1, zap, pgd_page_buffer + (PAGE_SIZE / sizeof(u64)));
+ } else {
+ /*
+ * So target_pte is a level-3 page.
+ * As a leaf, it is safe to free it.
+ * Unless we have live pages attached to it!
+ */
+ mmu_check_unused(kctx, target_pgd);
+ }
+
+ beenthere(kctx, "pte %lx level %d", (unsigned long)target_pgd, level + 1);
+ if (zap) {
+ struct page *p = phys_to_page(target_pgd);
+
+ kbase_mem_pool_free(&kctx->mem_pool, p, true);
+ kbase_process_page_usage_dec(kctx, 1);
+ kbase_atomic_sub_pages(1, &kctx->used_pages);
+ kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+ }
+ }
+ }
+}
+
+int kbase_mmu_init(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL == kctx->mmu_teardown_pages);
+
+ mutex_init(&kctx->mmu_lock);
+
+ /* Preallocate MMU depth of four pages for mmu_teardown_level to use */
+ kctx->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+
+ if (NULL == kctx->mmu_teardown_pages)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void kbase_mmu_term(struct kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
+
+ kfree(kctx->mmu_teardown_pages);
+ kctx->mmu_teardown_pages = NULL;
+}
+
+void kbase_mmu_free_pgd(struct kbase_context *kctx)
+{
+ int new_page_count __maybe_unused;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
+
+ mutex_lock(&kctx->mmu_lock);
+ mmu_teardown_level(kctx, kctx->pgd, MIDGARD_MMU_TOPLEVEL, 1, kctx->mmu_teardown_pages);
+ mutex_unlock(&kctx->mmu_lock);
+
+ beenthere(kctx, "pgd %lx", (unsigned long)kctx->pgd);
+ kbase_mem_pool_free(&kctx->mem_pool, phys_to_page(kctx->pgd), true);
+ kbase_process_page_usage_dec(kctx, 1);
+ new_page_count = kbase_atomic_sub_pages(1, &kctx->used_pages);
+ kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+
+ kbase_tlstream_aux_pagesalloc(
+ (u32)kctx->id,
+ (u64)new_page_count);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_free_pgd);
+
+static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
+{
+ phys_addr_t target_pgd;
+ u64 *pgd_page;
+ int i;
+ size_t size = KBASE_MMU_PAGE_ENTRIES * sizeof(u64) + sizeof(u64);
+ size_t dump_size;
+ struct kbase_mmu_mode const *mmu_mode;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ lockdep_assert_held(&kctx->mmu_lock);
+
+ mmu_mode = kctx->kbdev->mmu_mode;
+
+ pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+ if (!pgd_page) {
+ dev_warn(kctx->kbdev->dev, "kbasep_mmu_dump_level: kmap failure\n");
+ return 0;
+ }
+
+ if (*size_left >= size) {
+ /* A modified physical address that contains the page table level */
+ u64 m_pgd = pgd | level;
+
+ /* Put the modified physical address in the output buffer */
+ memcpy(*buffer, &m_pgd, sizeof(m_pgd));
+ *buffer += sizeof(m_pgd);
+
+ /* Followed by the page table itself */
+ memcpy(*buffer, pgd_page, sizeof(u64) * KBASE_MMU_PAGE_ENTRIES);
+ *buffer += sizeof(u64) * KBASE_MMU_PAGE_ENTRIES;
+
+ *size_left -= size;
+ }
+
+ if (level < MIDGARD_MMU_BOTTOMLEVEL) {
+ for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+ if (mmu_mode->pte_is_valid(pgd_page[i])) {
+ target_pgd = mmu_mode->pte_to_phy_addr(
+ pgd_page[i]);
+
+ dump_size = kbasep_mmu_dump_level(kctx,
+ target_pgd, level + 1,
+ buffer, size_left);
+ if (!dump_size) {
+ kunmap(pfn_to_page(PFN_DOWN(pgd)));
+ return 0;
+ }
+ size += dump_size;
+ }
+ }
+ }
+
+ kunmap(pfn_to_page(PFN_DOWN(pgd)));
+
+ return size;
+}
+
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
+{
+ void *kaddr;
+ size_t size_left;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ if (0 == nr_pages) {
+ /* can't dump in a 0 sized buffer, early out */
+ return NULL;
+ }
+
+ size_left = nr_pages * PAGE_SIZE;
+
+ KBASE_DEBUG_ASSERT(0 != size_left);
+ kaddr = vmalloc_user(size_left);
+
+ mutex_lock(&kctx->mmu_lock);
+
+ if (kaddr) {
+ u64 end_marker = 0xFFULL;
+ char *buffer;
+ char *mmu_dump_buffer;
+ u64 config[3];
+ size_t size;
+
+ buffer = (char *)kaddr;
+ mmu_dump_buffer = buffer;
+
+ if (kctx->api_version >= KBASE_API_VERSION(8, 4)) {
+ struct kbase_mmu_setup as_setup;
+
+ kctx->kbdev->mmu_mode->get_as_setup(kctx, &as_setup);
+ config[0] = as_setup.transtab;
+ config[1] = as_setup.memattr;
+ config[2] = as_setup.transcfg;
+ memcpy(buffer, &config, sizeof(config));
+ mmu_dump_buffer += sizeof(config);
+ size_left -= sizeof(config);
+ }
+
+
+
+ size = kbasep_mmu_dump_level(kctx,
+ kctx->pgd,
+ MIDGARD_MMU_TOPLEVEL,
+ &mmu_dump_buffer,
+ &size_left);
+
+ if (!size)
+ goto fail_free;
+
+ /* Add on the size for the end marker */
+ size += sizeof(u64);
+ /* Add on the size for the config */
+ if (kctx->api_version >= KBASE_API_VERSION(8, 4))
+ size += sizeof(config);
+
+
+ if (size > nr_pages * PAGE_SIZE || size_left < sizeof(u64)) {
+ /* The buffer isn't big enough - free the memory and return failure */
+ goto fail_free;
+ }
+
+ /* Add the end marker */
+ memcpy(mmu_dump_buffer, &end_marker, sizeof(u64));
+ }
+
+ mutex_unlock(&kctx->mmu_lock);
+ return kaddr;
+
+fail_free:
+ vfree(kaddr);
+ mutex_unlock(&kctx->mmu_lock);
+ return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_dump);
+
+void bus_fault_worker(struct work_struct *data)
+{
+ struct kbase_as *faulting_as;
+ int as_no;
+ struct kbase_context *kctx;
+ struct kbase_device *kbdev;
+#if KBASE_GPU_RESET_EN
+ bool reset_status = false;
+#endif /* KBASE_GPU_RESET_EN */
+
+ faulting_as = container_of(data, struct kbase_as, work_busfault);
+
+ as_no = faulting_as->number;
+
+ kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+
+ /* Grab the context that was already refcounted in kbase_mmu_interrupt().
+ * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+ */
+ kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
+ if (WARN_ON(!kctx)) {
+ atomic_dec(&kbdev->faults_pending);
+ return;
+ }
+
+ if (unlikely(faulting_as->protected_mode))
+ {
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Permission failure");
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ atomic_dec(&kbdev->faults_pending);
+ return;
+
+ }
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+ /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
+ * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
+ * are evicted from the GPU before the switch.
+ */
+ dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
+ reset_status = kbase_prepare_to_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ /* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */
+ if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ unsigned long flags;
+
+ /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ /* Set the MMU into unmapped mode */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_disable(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+
+ kbase_pm_context_idle(kbdev);
+ }
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
+ kbase_reset_gpu(kbdev);
+#endif /* KBASE_GPU_RESET_EN */
+
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+ atomic_dec(&kbdev->faults_pending);
+}
+
+const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code)
+{
+ const char *e;
+
+ switch (exception_code) {
+ /* Non-Fault Status code */
+ case 0x00:
+ e = "NOT_STARTED/IDLE/OK";
+ break;
+ case 0x01:
+ e = "DONE";
+ break;
+ case 0x02:
+ e = "INTERRUPTED";
+ break;
+ case 0x03:
+ e = "STOPPED";
+ break;
+ case 0x04:
+ e = "TERMINATED";
+ break;
+ case 0x08:
+ e = "ACTIVE";
+ break;
+ /* Job exceptions */
+ case 0x40:
+ e = "JOB_CONFIG_FAULT";
+ break;
+ case 0x41:
+ e = "JOB_POWER_FAULT";
+ break;
+ case 0x42:
+ e = "JOB_READ_FAULT";
+ break;
+ case 0x43:
+ e = "JOB_WRITE_FAULT";
+ break;
+ case 0x44:
+ e = "JOB_AFFINITY_FAULT";
+ break;
+ case 0x48:
+ e = "JOB_BUS_FAULT";
+ break;
+ case 0x50:
+ e = "INSTR_INVALID_PC";
+ break;
+ case 0x51:
+ e = "INSTR_INVALID_ENC";
+ break;
+ case 0x52:
+ e = "INSTR_TYPE_MISMATCH";
+ break;
+ case 0x53:
+ e = "INSTR_OPERAND_FAULT";
+ break;
+ case 0x54:
+ e = "INSTR_TLS_FAULT";
+ break;
+ case 0x55:
+ e = "INSTR_BARRIER_FAULT";
+ break;
+ case 0x56:
+ e = "INSTR_ALIGN_FAULT";
+ break;
+ case 0x58:
+ e = "DATA_INVALID_FAULT";
+ break;
+ case 0x59:
+ e = "TILE_RANGE_FAULT";
+ break;
+ case 0x5A:
+ e = "ADDR_RANGE_FAULT";
+ break;
+ case 0x60:
+ e = "OUT_OF_MEMORY";
+ break;
+ /* GPU exceptions */
+ case 0x80:
+ e = "DELAYED_BUS_FAULT";
+ break;
+ case 0x88:
+ e = "SHAREABILITY_FAULT";
+ break;
+ /* MMU exceptions */
+ case 0xC0:
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC4:
+ case 0xC5:
+ case 0xC6:
+ case 0xC7:
+ e = "TRANSLATION_FAULT";
+ break;
+ case 0xC8:
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ case 0xC9:
+ case 0xCA:
+ case 0xCB:
+ case 0xCC:
+ case 0xCD:
+ case 0xCE:
+ case 0xCF:
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+ e = "PERMISSION_FAULT";
+ break;
+ case 0xD0:
+ case 0xD1:
+ case 0xD2:
+ case 0xD3:
+ case 0xD4:
+ case 0xD5:
+ case 0xD6:
+ case 0xD7:
+ e = "TRANSTAB_BUS_FAULT";
+ break;
+ case 0xD8:
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ case 0xD9:
+ case 0xDA:
+ case 0xDB:
+ case 0xDC:
+ case 0xDD:
+ case 0xDE:
+ case 0xDF:
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+ e = "ACCESS_FLAG";
+ break;
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ case 0xE0:
+ case 0xE1:
+ case 0xE2:
+ case 0xE3:
+ case 0xE4:
+ case 0xE5:
+ case 0xE6:
+ case 0xE7:
+ e = "ADDRESS_SIZE_FAULT";
+ break;
+ case 0xE8:
+ case 0xE9:
+ case 0xEA:
+ case 0xEB:
+ case 0xEC:
+ case 0xED:
+ case 0xEE:
+ case 0xEF:
+ e = "MEMORY_ATTRIBUTES_FAULT";
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+ break;
+ default:
+ e = "UNKNOWN";
+ break;
+ };
+
+ return e;
+}
+
+static const char *access_type_name(struct kbase_device *kbdev,
+ u32 fault_status)
+{
+ switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+ case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ return "ATOMIC";
+#else
+ return "UNKNOWN";
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+ case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+ return "READ";
+ case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+ return "WRITE";
+ case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+ return "EXECUTE";
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+/**
+ * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on.
+ */
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, const char *reason_str)
+{
+ unsigned long flags;
+ int exception_type;
+ int access_type;
+ int source_id;
+ int as_no;
+ struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
+
+#if KBASE_GPU_RESET_EN
+ bool reset_status = false;
+#endif
+
+ as_no = as->number;
+ kbdev = kctx->kbdev;
+ js_devdata = &kbdev->js_data;
+
+ /* ASSERT that the context won't leave the runpool */
+ KBASE_DEBUG_ASSERT(kbasep_js_debug_check_ctx_refcount(kbdev, kctx) > 0);
+
+ /* decode the fault status */
+ exception_type = as->fault_status & 0xFF;
+ access_type = (as->fault_status >> 8) & 0x3;
+ source_id = (as->fault_status >> 16);
+
+ /* terminal fault, print info about the fault */
+ dev_err(kbdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n"
+ "pid: %d\n",
+ as_no, as->fault_addr,
+ reason_str,
+ as->fault_status,
+ (as->fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, kbase_exception_name(kbdev, exception_type),
+ access_type, access_type_name(kbdev, as->fault_status),
+ source_id,
+ kctx->pid);
+
+ /* hardware counters dump fault handling */
+ if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) &&
+ (kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_DUMPING)) {
+ unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
+
+ if ((as->fault_addr >= kbdev->hwcnt.addr) &&
+ (as->fault_addr < (kbdev->hwcnt.addr +
+ (num_core_groups * 2048))))
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
+ }
+
+ /* Stop the kctx from submitting more jobs and cause it to be scheduled
+ * out/rescheduled - this will occur on releasing the context's refcount */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ /* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
+ * context can appear in the job slots from this point on */
+ kbase_backend_jm_kill_jobs_from_kctx(kctx);
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+#if KBASE_GPU_RESET_EN
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+ /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
+ * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
+ * are evicted from the GPU before the switch.
+ */
+ dev_err(kbdev->dev, "Unhandled page fault. For this GPU version we now soft-reset the GPU as part of page fault recovery.");
+ reset_status = kbase_prepare_to_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+ /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_mmu_disable(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+ /* Clear down the fault */
+ kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
+ kbase_reset_gpu(kbdev);
+#endif /* KBASE_GPU_RESET_EN */
+}
+
+void kbasep_as_do_poke(struct work_struct *work)
+{
+ struct kbase_as *as;
+ struct kbase_device *kbdev;
+ struct kbase_context *kctx;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(work);
+ as = container_of(work, struct kbase_as, poke_work);
+ kbdev = container_of(as, struct kbase_device, as[as->number]);
+ KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+ /* GPU power will already be active by virtue of the caller holding a JS
+ * reference on the address space, and will not release it until this worker
+ * has finished */
+
+ /* Further to the comment above, we know that while this function is running
+ * the AS will not be released as before the atom is released this workqueue
+ * is flushed (in kbase_as_poking_timer_release_atom)
+ */
+ kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number);
+
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ /* Force a uTLB invalidate */
+ kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0,
+ AS_COMMAND_UNLOCK, 0);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (as->poke_refcount &&
+ !(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) {
+ /* Only queue up the timer if we need it, and we're not trying to kill it */
+ hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
+{
+ struct kbase_as *as;
+ int queue_work_ret;
+
+ KBASE_DEBUG_ASSERT(NULL != timer);
+ as = container_of(timer, struct kbase_as, poke_timer);
+ KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+ queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
+ KBASE_DEBUG_ASSERT(queue_work_ret);
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * Retain the poking timer on an atom's context (if the atom hasn't already
+ * done so), and start the timer (if it's not already started).
+ *
+ * This must only be called on a context that's scheduled in, and an atom
+ * that's running on the GPU.
+ *
+ * The caller must hold hwaccess_lock
+ *
+ * This can be called safely from atomic context
+ */
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ struct kbase_as *as;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (katom->poking)
+ return;
+
+ katom->poking = 1;
+
+ /* It's safe to work on the as/as_nr without an explicit reference,
+ * because the caller holds the hwaccess_lock, and the atom itself
+ * was also running and had already taken a reference */
+ as = &kbdev->as[kctx->as_nr];
+
+ if (++(as->poke_refcount) == 1) {
+ /* First refcount for poke needed: check if not already in flight */
+ if (!as->poke_state) {
+ /* need to start poking */
+ as->poke_state |= KBASE_AS_POKE_STATE_IN_FLIGHT;
+ queue_work(as->poke_wq, &as->poke_work);
+ }
+ }
+}
+
+/**
+ * If an atom holds a poking timer, release it and wait for it to finish
+ *
+ * This must only be called on a context that's scheduled in, and an atom
+ * that still has a JS reference on the context
+ *
+ * This must \b not be called from atomic context, since it can sleep.
+ */
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+ struct kbase_as *as;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ if (!katom->poking)
+ return;
+
+ as = &kbdev->as[kctx->as_nr];
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ KBASE_DEBUG_ASSERT(as->poke_refcount > 0);
+ KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+ if (--(as->poke_refcount) == 0) {
+ as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ hrtimer_cancel(&as->poke_timer);
+ flush_workqueue(as->poke_wq);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Re-check whether it's still needed */
+ if (as->poke_refcount) {
+ int queue_work_ret;
+ /* Poking still needed:
+ * - Another retain will not be starting the timer or queueing work,
+ * because it's still marked as in-flight
+ * - The hrtimer has finished, and has not started a new timer or
+ * queued work because it's been marked as killing
+ *
+ * So whatever happens now, just queue the work again */
+ as->poke_state &= ~((kbase_as_poke_state)KBASE_AS_POKE_STATE_KILLING_POKE);
+ queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
+ KBASE_DEBUG_ASSERT(queue_work_ret);
+ } else {
+ /* It isn't - so mark it as not in flight, and not killing */
+ as->poke_state = 0u;
+
+ /* The poke associated with the atom has now finished. If this is
+ * also the last atom on the context, then we can guarentee no more
+ * pokes (and thus no more poking register accesses) will occur on
+ * the context until new atoms are run */
+ }
+ }
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ katom->poking = 0;
+}
+
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_as *as)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (!kctx) {
+ dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Suprious IRQ or SW Design Error?\n",
+ kbase_as_has_bus_fault(as) ? "Bus error" : "Page fault",
+ as->number, as->fault_addr);
+
+ /* Since no ctx was found, the MMU must be disabled. */
+ WARN_ON(as->current_setup.transtab);
+
+ if (kbase_as_has_bus_fault(as)) {
+ kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ } else if (kbase_as_has_page_fault(as)) {
+ kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ }
+
+#if KBASE_GPU_RESET_EN
+ if (kbase_as_has_bus_fault(as) &&
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+ bool reset_status;
+ /*
+ * Reset the GPU, like in bus_fault_worker, in case an
+ * earlier error hasn't been properly cleared by this
+ * point.
+ */
+ dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
+ reset_status = kbase_prepare_to_reset_gpu_locked(kbdev);
+ if (reset_status)
+ kbase_reset_gpu_locked(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+
+ return;
+ }
+
+ if (kbase_as_has_bus_fault(as)) {
+ /*
+ * hw counters dumping in progress, signal the
+ * other thread that it failed
+ */
+ if ((kbdev->hwcnt.kctx == kctx) &&
+ (kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_DUMPING))
+ kbdev->hwcnt.backend.state =
+ KBASE_INSTR_STATE_FAULT;
+
+ /*
+ * Stop the kctx from submitting more jobs and cause it
+ * to be scheduled out/rescheduled when all references
+ * to it are released
+ */
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ dev_warn(kbdev->dev,
+ "Bus error in AS%d at VA=0x%016llx, IPA=0x%016llx\n",
+ as->number, as->fault_addr,
+ as->fault_extra_addr);
+#else
+ dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n",
+ as->number, as->fault_addr);
+#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
+
+ /*
+ * We need to switch to UNMAPPED mode - but we do this in a
+ * worker so that we can sleep
+ */
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault));
+ WARN_ON(work_pending(&as->work_busfault));
+ queue_work(as->pf_wq, &as->work_busfault);
+ atomic_inc(&kbdev->faults_pending);
+ } else {
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault));
+ WARN_ON(work_pending(&as->work_pagefault));
+ queue_work(as->pf_wq, &as->work_pagefault);
+ atomic_inc(&kbdev->faults_pending);
+ }
+}
+
+void kbase_flush_mmu_wqs(struct kbase_device *kbdev)
+{
+ int i;
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ struct kbase_as *as = &kbdev->as[i];
+
+ flush_workqueue(as->pf_wq);
+ }
+}
diff --git a/midgard-0.r2p0/mali_kbase_mmu_hw.h b/midgard-0.r2p0/mali_kbase_mmu_hw.h
new file mode 100755
index 0000000..986e959
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mmu_hw.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file
+ * Interface file for accessing MMU hardware functionality
+ */
+
+/**
+ * @page mali_kbase_mmu_hw_page MMU hardware interface
+ *
+ * @section mali_kbase_mmu_hw_intro_sec Introduction
+ * This module provides an abstraction for accessing the functionality provided
+ * by the midgard MMU and thus allows all MMU HW access to be contained within
+ * one common place and allows for different backends (implementations) to
+ * be provided.
+ */
+
+#ifndef _MALI_KBASE_MMU_HW_H_
+#define _MALI_KBASE_MMU_HW_H_
+
+/* Forward declarations */
+struct kbase_device;
+struct kbase_as;
+struct kbase_context;
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup mali_kbase_mmu_hw MMU access APIs
+ * @{
+ */
+
+/** @brief MMU fault type descriptor.
+ */
+enum kbase_mmu_fault_type {
+ KBASE_MMU_FAULT_TYPE_UNKNOWN = 0,
+ KBASE_MMU_FAULT_TYPE_PAGE,
+ KBASE_MMU_FAULT_TYPE_BUS,
+ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED,
+ KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED
+};
+
+/** @brief Configure an address space for use.
+ *
+ * Configure the MMU using the address space details setup in the
+ * @ref kbase_context structure.
+ *
+ * @param[in] kbdev kbase device to configure.
+ * @param[in] as address space to configure.
+ * @param[in] kctx kbase context to configure.
+ */
+void kbase_mmu_hw_configure(struct kbase_device *kbdev,
+ struct kbase_as *as, struct kbase_context *kctx);
+
+/** @brief Issue an operation to the MMU.
+ *
+ * Issue an operation (MMU invalidate, MMU flush, etc) on the address space that
+ * is associated with the provided @ref kbase_context over the specified range
+ *
+ * @param[in] kbdev kbase device to issue the MMU operation on.
+ * @param[in] as address space to issue the MMU operation on.
+ * @param[in] kctx kbase context to issue the MMU operation on.
+ * @param[in] vpfn MMU Virtual Page Frame Number to start the
+ * operation on.
+ * @param[in] nr Number of pages to work on.
+ * @param[in] type Operation type (written to ASn_COMMAND).
+ * @param[in] handling_irq Is this operation being called during the handling
+ * of an interrupt?
+ *
+ * @return Zero if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, u64 vpfn, u32 nr, u32 type,
+ unsigned int handling_irq);
+
+/** @brief Clear a fault that has been previously reported by the MMU.
+ *
+ * Clear a bus error or page fault that has been reported by the MMU.
+ *
+ * @param[in] kbdev kbase device to clear the fault from.
+ * @param[in] as address space to clear the fault from.
+ * @param[in] kctx kbase context to clear the fault from or NULL.
+ * @param[in] type The type of fault that needs to be cleared.
+ */
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+
+/** @brief Enable fault that has been previously reported by the MMU.
+ *
+ * After a page fault or bus error has been reported by the MMU these
+ * will be disabled. After these are handled this function needs to be
+ * called to enable the page fault or bus error fault again.
+ *
+ * @param[in] kbdev kbase device to again enable the fault from.
+ * @param[in] as address space to again enable the fault from.
+ * @param[in] kctx kbase context to again enable the fault from.
+ * @param[in] type The type of fault that needs to be enabled again.
+ */
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+
+/** @} *//* end group mali_kbase_mmu_hw */
+/** @} *//* end group base_kbase_api */
+
+#endif /* _MALI_KBASE_MMU_HW_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_mmu_mode.h b/midgard-0.r2p0/mali_kbase_mmu_mode.h
new file mode 100755
index 0000000..2449c60
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mmu_mode.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _MALI_KBASE_MMU_MODE_
+#define _MALI_KBASE_MMU_MODE_
+
+#include <linux/types.h>
+
+/* Forward declarations */
+struct kbase_context;
+struct kbase_device;
+struct kbase_as;
+struct kbase_mmu_setup;
+
+struct kbase_mmu_mode {
+ void (*update)(struct kbase_context *kctx);
+ void (*get_as_setup)(struct kbase_context *kctx,
+ struct kbase_mmu_setup * const setup);
+ void (*disable_as)(struct kbase_device *kbdev, int as_nr);
+ phys_addr_t (*pte_to_phy_addr)(u64 entry);
+ int (*ate_is_valid)(u64 ate);
+ int (*pte_is_valid)(u64 pte);
+ void (*entry_set_ate)(u64 *entry, phys_addr_t phy, unsigned long flags);
+ void (*entry_set_pte)(u64 *entry, phys_addr_t phy);
+ void (*entry_invalidate)(u64 *entry);
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void);
+struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
+
+#endif /* _MALI_KBASE_MMU_MODE_ */
diff --git a/midgard-0.r2p0/mali_kbase_mmu_mode_aarch64.c b/midgard-0.r2p0/mali_kbase_mmu_mode_aarch64.c
new file mode 100755
index 0000000..791f3ed
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mmu_mode_aarch64.c
@@ -0,0 +1,200 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include "mali_kbase_mmu_mode.h"
+
+#include "mali_kbase.h"
+#include "mali_midg_regmap.h"
+
+#define ENTRY_TYPE_MASK 3ULL
+/* For valid ATEs bit 1 = (level == 3) ? 1 : 0.
+ * The MMU is only ever configured by the driver so that ATEs
+ * are at level 3, so bit 1 should always be set
+ */
+#define ENTRY_IS_ATE 3ULL
+#define ENTRY_IS_INVAL 2ULL
+#define ENTRY_IS_PTE 3ULL
+
+#define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */
+#define ENTRY_ACCESS_RW (1ULL << 6) /* bits 6:7 */
+#define ENTRY_ACCESS_RO (3ULL << 6)
+#define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+/* Helper Function to perform assignment of page table entries, to
+ * ensure the use of strd, which is required on LPAE systems.
+ */
+static inline void page_table_entry_set(u64 *pte, u64 phy)
+{
+#ifdef CONFIG_64BIT
+ *pte = phy;
+#elif defined(CONFIG_ARM)
+ /*
+ * In order to prevent the compiler keeping cached copies of
+ * memory, we have to explicitly say that we have updated memory.
+ *
+ * Note: We could manually move the data ourselves into R0 and
+ * R1 by specifying register variables that are explicitly
+ * given registers assignments, the down side of this is that
+ * we have to assume cpu endianness. To avoid this we can use
+ * the ldrd to read the data from memory into R0 and R1 which
+ * will respect the cpu endianness, we then use strd to make
+ * the 64 bit assignment to the page table entry.
+ */
+ asm volatile("ldrd r0, r1, [%[ptemp]]\n\t"
+ "strd r0, r1, [%[pte]]\n\t"
+ : "=m" (*pte)
+ : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy)
+ : "r0", "r1");
+#else
+#error "64-bit atomic write must be implemented for your architecture"
+#endif
+}
+
+static void mmu_get_as_setup(struct kbase_context *kctx,
+ struct kbase_mmu_setup * const setup)
+{
+ /* Set up the required caching policies at the correct indices
+ * in the memattr register.
+ */
+ setup->memattr =
+ (AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
+ (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+ (AS_MEMATTR_FORCE_TO_CACHE_ALL <<
+ (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+ (AS_MEMATTR_WRITE_ALLOC <<
+ (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+ (AS_MEMATTR_AARCH64_OUTER_IMPL_DEF <<
+ (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+ (AS_MEMATTR_AARCH64_OUTER_WA <<
+ (AS_MEMATTR_INDEX_OUTER_WA * 8));
+
+ setup->transtab = (u64)kctx->pgd & AS_TRANSTAB_BASE_MASK;
+ setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
+}
+
+static void mmu_update(struct kbase_context *kctx)
+{
+ struct kbase_device * const kbdev = kctx->kbdev;
+ struct kbase_as * const as = &kbdev->as[kctx->as_nr];
+ struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+ mmu_get_as_setup(kctx, current_setup);
+
+ /* Apply the address space setting */
+ kbase_mmu_hw_configure(kbdev, as, kctx);
+}
+
+static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+ struct kbase_as * const as = &kbdev->as[as_nr];
+ struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+ current_setup->transtab = 0ULL;
+ current_setup->transcfg = AS_TRANSCFG_ADRMODE_UNMAPPED;
+
+ /* Apply the address space setting */
+ kbase_mmu_hw_configure(kbdev, as, NULL);
+}
+
+static phys_addr_t pte_to_phy_addr(u64 entry)
+{
+ if (!(entry & 1))
+ return 0;
+
+ return entry & ~0xFFF;
+}
+
+static int ate_is_valid(u64 ate)
+{
+ return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE);
+}
+
+static int pte_is_valid(u64 pte)
+{
+ return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
+}
+
+/*
+ * Map KBASE_REG flags to MMU flags
+ */
+static u64 get_mmu_flags(unsigned long flags)
+{
+ u64 mmu_flags;
+
+ /* store mem_attr index as 4:2 (macro called ensures 3 bits already) */
+ mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2;
+
+ /* Set access flags - note that AArch64 stage 1 does not support
+ * write-only access, so we use read/write instead
+ */
+ if (flags & KBASE_REG_GPU_WR)
+ mmu_flags |= ENTRY_ACCESS_RW;
+ else if (flags & KBASE_REG_GPU_RD)
+ mmu_flags |= ENTRY_ACCESS_RO;
+
+ /* nx if requested */
+ mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
+
+ if (flags & KBASE_REG_SHARE_BOTH) {
+ /* inner and outer shareable */
+ mmu_flags |= SHARE_BOTH_BITS;
+ } else if (flags & KBASE_REG_SHARE_IN) {
+ /* inner shareable coherency */
+ mmu_flags |= SHARE_INNER_BITS;
+ }
+
+ return mmu_flags;
+}
+
+static void entry_set_ate(u64 *entry, phys_addr_t phy, unsigned long flags)
+{
+ page_table_entry_set(entry, (phy & ~0xFFF) |
+ get_mmu_flags(flags) |
+ ENTRY_ACCESS_BIT | ENTRY_IS_ATE);
+}
+
+static void entry_set_pte(u64 *entry, phys_addr_t phy)
+{
+ page_table_entry_set(entry, (phy & ~0xFFF) |
+ ENTRY_ACCESS_BIT | ENTRY_IS_PTE);
+}
+
+static void entry_invalidate(u64 *entry)
+{
+ page_table_entry_set(entry, ENTRY_IS_INVAL);
+}
+
+static struct kbase_mmu_mode const aarch64_mode = {
+ .update = mmu_update,
+ .get_as_setup = mmu_get_as_setup,
+ .disable_as = mmu_disable_as,
+ .pte_to_phy_addr = pte_to_phy_addr,
+ .ate_is_valid = ate_is_valid,
+ .pte_is_valid = pte_is_valid,
+ .entry_set_ate = entry_set_ate,
+ .entry_set_pte = entry_set_pte,
+ .entry_invalidate = entry_invalidate
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void)
+{
+ return &aarch64_mode;
+}
diff --git a/midgard-0.r2p0/mali_kbase_mmu_mode_lpae.c b/midgard-0.r2p0/mali_kbase_mmu_mode_lpae.c
new file mode 100755
index 0000000..683cabb
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_mmu_mode_lpae.c
@@ -0,0 +1,206 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include "mali_kbase_mmu_mode.h"
+
+#include "mali_kbase.h"
+#include "mali_midg_regmap.h"
+
+#define ENTRY_TYPE_MASK 3ULL
+#define ENTRY_IS_ATE 1ULL
+#define ENTRY_IS_INVAL 2ULL
+#define ENTRY_IS_PTE 3ULL
+
+#define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */
+#define ENTRY_RD_BIT (1ULL << 6)
+#define ENTRY_WR_BIT (1ULL << 7)
+#define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+#define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | \
+ ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT)
+
+/* Helper Function to perform assignment of page table entries, to
+ * ensure the use of strd, which is required on LPAE systems.
+ */
+static inline void page_table_entry_set(u64 *pte, u64 phy)
+{
+#ifdef CONFIG_64BIT
+ *pte = phy;
+#elif defined(CONFIG_ARM)
+ /*
+ * In order to prevent the compiler keeping cached copies of
+ * memory, we have to explicitly say that we have updated
+ * memory.
+ *
+ * Note: We could manually move the data ourselves into R0 and
+ * R1 by specifying register variables that are explicitly
+ * given registers assignments, the down side of this is that
+ * we have to assume cpu endianness. To avoid this we can use
+ * the ldrd to read the data from memory into R0 and R1 which
+ * will respect the cpu endianness, we then use strd to make
+ * the 64 bit assignment to the page table entry.
+ */
+ asm volatile("ldrd r0, r1, [%[ptemp]]\n\t"
+ "strd r0, r1, [%[pte]]\n\t"
+ : "=m" (*pte)
+ : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy)
+ : "r0", "r1");
+#else
+#error "64-bit atomic write must be implemented for your architecture"
+#endif
+}
+
+static void mmu_get_as_setup(struct kbase_context *kctx,
+ struct kbase_mmu_setup * const setup)
+{
+ /* Set up the required caching policies at the correct indices
+ * in the memattr register. */
+ setup->memattr =
+ (AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY <<
+ (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+ (AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL <<
+ (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+ (AS_MEMATTR_LPAE_WRITE_ALLOC <<
+ (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+ (AS_MEMATTR_LPAE_OUTER_IMPL_DEF <<
+ (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+ (AS_MEMATTR_LPAE_OUTER_WA <<
+ (AS_MEMATTR_INDEX_OUTER_WA * 8)) |
+ 0; /* The other indices are unused for now */
+
+ setup->transtab = ((u64)kctx->pgd &
+ ((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK)) |
+ AS_TRANSTAB_LPAE_ADRMODE_TABLE |
+ AS_TRANSTAB_LPAE_READ_INNER;
+
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ setup->transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
+#else
+ setup->transcfg = 0;
+#endif
+}
+
+static void mmu_update(struct kbase_context *kctx)
+{
+ struct kbase_device * const kbdev = kctx->kbdev;
+ struct kbase_as * const as = &kbdev->as[kctx->as_nr];
+ struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+ mmu_get_as_setup(kctx, current_setup);
+
+ /* Apply the address space setting */
+ kbase_mmu_hw_configure(kbdev, as, kctx);
+}
+
+static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+ struct kbase_as * const as = &kbdev->as[as_nr];
+ struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+ current_setup->transtab = AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED;
+
+#ifdef CONFIG_MALI_GPU_MMU_AARCH64
+ current_setup->transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
+#endif
+
+ /* Apply the address space setting */
+ kbase_mmu_hw_configure(kbdev, as, NULL);
+}
+
+static phys_addr_t pte_to_phy_addr(u64 entry)
+{
+ if (!(entry & 1))
+ return 0;
+
+ return entry & ~0xFFF;
+}
+
+static int ate_is_valid(u64 ate)
+{
+ return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE);
+}
+
+static int pte_is_valid(u64 pte)
+{
+ return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
+}
+
+/*
+ * Map KBASE_REG flags to MMU flags
+ */
+static u64 get_mmu_flags(unsigned long flags)
+{
+ u64 mmu_flags;
+
+ /* store mem_attr index as 4:2 (macro called ensures 3 bits already) */
+ mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2;
+
+ /* write perm if requested */
+ mmu_flags |= (flags & KBASE_REG_GPU_WR) ? ENTRY_WR_BIT : 0;
+ /* read perm if requested */
+ mmu_flags |= (flags & KBASE_REG_GPU_RD) ? ENTRY_RD_BIT : 0;
+ /* nx if requested */
+ mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
+
+ if (flags & KBASE_REG_SHARE_BOTH) {
+ /* inner and outer shareable */
+ mmu_flags |= SHARE_BOTH_BITS;
+ } else if (flags & KBASE_REG_SHARE_IN) {
+ /* inner shareable coherency */
+ mmu_flags |= SHARE_INNER_BITS;
+ }
+
+ return mmu_flags;
+}
+
+static void entry_set_ate(u64 *entry, phys_addr_t phy, unsigned long flags)
+{
+ page_table_entry_set(entry, (phy & ~0xFFF) |
+ get_mmu_flags(flags) |
+ ENTRY_IS_ATE);
+}
+
+static void entry_set_pte(u64 *entry, phys_addr_t phy)
+{
+ page_table_entry_set(entry, (phy & ~0xFFF) | ENTRY_IS_PTE);
+}
+
+static void entry_invalidate(u64 *entry)
+{
+ page_table_entry_set(entry, ENTRY_IS_INVAL);
+}
+
+static struct kbase_mmu_mode const lpae_mode = {
+ .update = mmu_update,
+ .get_as_setup = mmu_get_as_setup,
+ .disable_as = mmu_disable_as,
+ .pte_to_phy_addr = pte_to_phy_addr,
+ .ate_is_valid = ate_is_valid,
+ .pte_is_valid = pte_is_valid,
+ .entry_set_ate = entry_set_ate,
+ .entry_set_pte = entry_set_pte,
+ .entry_invalidate = entry_invalidate
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void)
+{
+ return &lpae_mode;
+}
diff --git a/midgard-0.r2p0/mali_kbase_platform_fake.c b/midgard-0.r2p0/mali_kbase_platform_fake.c
new file mode 100755
index 0000000..1a44957
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_platform_fake.c
@@ -0,0 +1,124 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+
+/*
+ * This file is included only for type definitions and functions belonging to
+ * specific platform folders. Do not add dependencies with symbols that are
+ * defined somewhere else.
+ */
+#include <mali_kbase_config.h>
+
+#define PLATFORM_CONFIG_RESOURCE_COUNT 4
+#define PLATFORM_CONFIG_IRQ_RES_COUNT 3
+
+static struct platform_device *mali_device;
+
+#ifndef CONFIG_OF
+/**
+ * @brief Convert data in struct kbase_io_resources struct to Linux-specific resources
+ *
+ * Function converts data in struct kbase_io_resources struct to an array of Linux resource structures. Note that function
+ * assumes that size of linux_resource array is at least PLATFORM_CONFIG_RESOURCE_COUNT.
+ * Resources are put in fixed order: I/O memory region, job IRQ, MMU IRQ, GPU IRQ.
+ *
+ * @param[in] io_resource Input IO resource data
+ * @param[out] linux_resources Pointer to output array of Linux resource structures
+ */
+static void kbasep_config_parse_io_resources(const struct kbase_io_resources *io_resources, struct resource *const linux_resources)
+{
+ if (!io_resources || !linux_resources) {
+ pr_err("%s: couldn't find proper resources\n", __func__);
+ return;
+ }
+
+ memset(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource));
+
+ linux_resources[0].start = io_resources->io_memory_region.start;
+ linux_resources[0].end = io_resources->io_memory_region.end;
+ linux_resources[0].flags = IORESOURCE_MEM;
+
+ linux_resources[1].start = io_resources->job_irq_number;
+ linux_resources[1].end = io_resources->job_irq_number;
+ linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+
+ linux_resources[2].start = io_resources->mmu_irq_number;
+ linux_resources[2].end = io_resources->mmu_irq_number;
+ linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+
+ linux_resources[3].start = io_resources->gpu_irq_number;
+ linux_resources[3].end = io_resources->gpu_irq_number;
+ linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+}
+#endif /* CONFIG_OF */
+
+int kbase_platform_fake_register(void)
+{
+ struct kbase_platform_config *config;
+#ifndef CONFIG_OF
+ struct resource resources[PLATFORM_CONFIG_RESOURCE_COUNT];
+#endif
+ int err;
+
+ config = kbase_get_platform_config(); /* declared in midgard/mali_kbase_config.h but defined in platform folder */
+ if (config == NULL) {
+ pr_err("%s: couldn't get platform config\n", __func__);
+ return -ENODEV;
+ }
+
+ mali_device = platform_device_alloc("mali", 0);
+ if (mali_device == NULL)
+ return -ENOMEM;
+
+#ifndef CONFIG_OF
+ kbasep_config_parse_io_resources(config->io_resources, resources);
+ err = platform_device_add_resources(mali_device, resources, PLATFORM_CONFIG_RESOURCE_COUNT);
+ if (err) {
+ platform_device_put(mali_device);
+ mali_device = NULL;
+ return err;
+ }
+#endif /* CONFIG_OF */
+
+ err = platform_device_add(mali_device);
+ if (err) {
+ platform_device_unregister(mali_device);
+ mali_device = NULL;
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(kbase_platform_fake_register);
+
+void kbase_platform_fake_unregister(void)
+{
+ if (mali_device)
+ platform_device_unregister(mali_device);
+}
+EXPORT_SYMBOL(kbase_platform_fake_unregister);
+
+#endif /* CONFIG_MALI_PLATFORM_FAKE */
+
diff --git a/midgard-0.r2p0/mali_kbase_pm.c b/midgard-0.r2p0/mali_kbase_pm.c
new file mode 100755
index 0000000..97d5434
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_pm.c
@@ -0,0 +1,205 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_pm.c
+ * Base kernel power management APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_vinstr.h>
+
+#include <mali_kbase_pm.h>
+
+int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
+{
+ return kbase_hwaccess_pm_powerup(kbdev, flags);
+}
+
+void kbase_pm_halt(struct kbase_device *kbdev)
+{
+ kbase_hwaccess_pm_halt(kbdev);
+}
+
+void kbase_pm_context_active(struct kbase_device *kbdev)
+{
+ (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
+}
+
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ int c;
+ int old_count;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ /* Trace timeline information about how long it took to handle the decision
+ * to powerup. Sometimes the event might be missed due to reading the count
+ * outside of mutex, but this is necessary to get the trace timing
+ * correct. */
+ old_count = kbdev->pm.active_count;
+ if (old_count == 0)
+ kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+ if (kbase_pm_is_suspending(kbdev)) {
+ switch (suspend_handler) {
+ case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
+ if (kbdev->pm.active_count != 0)
+ break;
+ /* FALLTHROUGH */
+ case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ if (old_count == 0)
+ kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
+ return 1;
+
+ case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
+ /* FALLTHROUGH */
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
+ break;
+ }
+ }
+ c = ++kbdev->pm.active_count;
+ KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
+
+ /* Trace the event being handled */
+ if (old_count == 0)
+ kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
+
+ if (c == 1)
+ /* First context active: Power on the GPU and any cores requested by
+ * the policy */
+ kbase_hwaccess_pm_gpu_active(kbdev);
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_context_active);
+
+void kbase_pm_context_idle(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ int c;
+ int old_count;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ /* Trace timeline information about how long it took to handle the decision
+ * to powerdown. Sometimes the event might be missed due to reading the
+ * count outside of mutex, but this is necessary to get the trace timing
+ * correct. */
+ old_count = kbdev->pm.active_count;
+ if (old_count == 0)
+ kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ mutex_lock(&kbdev->pm.lock);
+
+ c = --kbdev->pm.active_count;
+ KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
+
+ KBASE_DEBUG_ASSERT(c >= 0);
+
+ /* Trace the event being handled */
+ if (old_count == 0)
+ kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
+
+ if (c == 0) {
+ /* Last context has gone idle */
+ kbase_hwaccess_pm_gpu_idle(kbdev);
+
+ /* Wake up anyone waiting for this to become 0 (e.g. suspend). The
+ * waiters must synchronize with us by locking the pm.lock after
+ * waiting */
+ wake_up(&kbdev->pm.zero_active_count_wait);
+ }
+
+ mutex_unlock(&kbdev->pm.lock);
+ mutex_unlock(&js_devdata->runpool_mutex);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
+
+void kbase_pm_suspend(struct kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Suspend vinstr.
+ * This call will block until vinstr is suspended. */
+ kbase_vinstr_suspend(kbdev->vinstr_ctx);
+
+ mutex_lock(&kbdev->pm.lock);
+ KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+ kbdev->pm.suspending = true;
+ mutex_unlock(&kbdev->pm.lock);
+
+ /* From now on, the active count will drop towards zero. Sometimes, it'll
+ * go up briefly before going down again. However, once it reaches zero it
+ * will stay there - guaranteeing that we've idled all pm references */
+
+ /* Suspend job scheduler and associated components, so that it releases all
+ * the PM active count references */
+ kbasep_js_suspend(kbdev);
+
+ /* Wait for the active count to reach zero. This is not the same as
+ * waiting for a power down, since not all policies power down when this
+ * reaches zero. */
+ wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
+
+ /* NOTE: We synchronize with anything that was just finishing a
+ * kbase_pm_context_idle() call by locking the pm.lock below */
+
+ kbase_hwaccess_pm_suspend(kbdev);
+}
+
+void kbase_pm_resume(struct kbase_device *kbdev)
+{
+ /* MUST happen before any pm_context_active calls occur */
+ kbase_hwaccess_pm_resume(kbdev);
+
+ /* Initial active call, to power on the GPU/cores if needed */
+ kbase_pm_context_active(kbdev);
+
+ /* Resume any blocked atoms (which may cause contexts to be scheduled in
+ * and dependent atoms to run) */
+ kbase_resume_suspended_soft_jobs(kbdev);
+
+ /* Resume the Job Scheduler and associated components, and start running
+ * atoms */
+ kbasep_js_resume(kbdev);
+
+ /* Matching idle call, to power off the GPU/cores if we didn't actually
+ * need it and the policy doesn't want it on */
+ kbase_pm_context_idle(kbdev);
+
+ /* Resume vinstr operation */
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+}
+
diff --git a/midgard-0.r2p0/mali_kbase_pm.h b/midgard-0.r2p0/mali_kbase_pm.h
new file mode 100755
index 0000000..37fa247
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_pm.h
@@ -0,0 +1,171 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_pm.h
+ * Power management API definitions
+ */
+
+#ifndef _KBASE_PM_H_
+#define _KBASE_PM_H_
+
+#include "mali_kbase_hwaccess_pm.h"
+
+#define PM_ENABLE_IRQS 0x01
+#define PM_HW_ISSUES_DETECT 0x02
+
+
+/** Initialize the power management framework.
+ *
+ * Must be called before any other power management function
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ *
+ * @return 0 if the power management framework was successfully initialized.
+ */
+int kbase_pm_init(struct kbase_device *kbdev);
+
+/** Power up GPU after all modules have been initialized and interrupt handlers installed.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ *
+ * @param flags Flags to pass on to kbase_pm_init_hw
+ *
+ * @return 0 if powerup was successful.
+ */
+int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags);
+
+/**
+ * Halt the power management framework.
+ * Should ensure that no new interrupts are generated,
+ * but allow any currently running interrupt handlers to complete successfully.
+ * The GPU is forced off by the time this function returns, regardless of
+ * whether or not the active power policy asks for the GPU to be powered off.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_halt(struct kbase_device *kbdev);
+
+/** Terminate the power management framework.
+ *
+ * No power management functions may be called after this
+ * (except @ref kbase_pm_init)
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_term(struct kbase_device *kbdev);
+
+/** Increment the count of active contexts.
+ *
+ * This function should be called when a context is about to submit a job. It informs the active power policy that the
+ * GPU is going to be in use shortly and the policy is expected to start turning on the GPU.
+ *
+ * This function will block until the GPU is available.
+ *
+ * This function ASSERTS if a suspend is occuring/has occurred whilst this is
+ * in use. Use kbase_pm_contect_active_unless_suspending() instead.
+ *
+ * @note a Suspend is only visible to Kernel threads; user-space threads in a
+ * syscall cannot witness a suspend, because they are frozen before the suspend
+ * begins.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_context_active(struct kbase_device *kbdev);
+
+
+/** Handler codes for doing kbase_pm_context_active_handle_suspend() */
+enum kbase_pm_suspend_handler {
+ /** A suspend is not expected/not possible - this is the same as
+ * kbase_pm_context_active() */
+ KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE,
+ /** If we're suspending, fail and don't increase the active count */
+ KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE,
+ /** If we're suspending, succeed and allow the active count to increase iff
+ * it didn't go from 0->1 (i.e., we didn't re-activate the GPU).
+ *
+ * This should only be used when there is a bounded time on the activation
+ * (e.g. guarantee it's going to be idled very soon after) */
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE
+};
+
+/** Suspend 'safe' variant of kbase_pm_context_active()
+ *
+ * If a suspend is in progress, this allows for various different ways of
+ * handling the suspend. Refer to @ref enum kbase_pm_suspend_handler for details.
+ *
+ * We returns a status code indicating whether we're allowed to keep the GPU
+ * active during the suspend, depending on the handler code. If the status code
+ * indicates a failure, the caller must abort whatever operation it was
+ * attempting, and potentially queue it up for after the OS has resumed.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ * @param suspend_handler The handler code for how to handle a suspend that might occur
+ * @return zero Indicates success
+ * @return non-zero Indicates failure due to the system being suspending/suspended.
+ */
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler);
+
+/** Decrement the reference count of active contexts.
+ *
+ * This function should be called when a context becomes idle. After this call the GPU may be turned off by the power
+ * policy so the calling code should ensure that it does not access the GPU's registers.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_context_idle(struct kbase_device *kbdev);
+
+/**
+ * Suspend the GPU and prevent any further register accesses to it from Kernel
+ * threads.
+ *
+ * This is called in response to an OS suspend event, and calls into the various
+ * kbase components to complete the suspend.
+ *
+ * @note the mechanisms used here rely on all user-space threads being frozen
+ * by the OS before we suspend. Otherwise, an IOCTL could occur that powers up
+ * the GPU e.g. via atom submission.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_suspend(struct kbase_device *kbdev);
+
+/**
+ * Resume the GPU, allow register accesses to it, and resume running atoms on
+ * the GPU.
+ *
+ * This is called in response to an OS resume event, and calls into the various
+ * kbase components to complete the resume.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_resume(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_vsync_callback - vsync callback
+ *
+ * @buffer_updated: 1 if a new frame was displayed, 0 otherwise
+ * @data: Pointer to the kbase device as returned by kbase_find_device()
+ *
+ * Callback function used to notify the power management code that a vsync has
+ * occurred on the display.
+ */
+void kbase_pm_vsync_callback(int buffer_updated, void *data);
+
+#endif /* _KBASE_PM_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_profiling_gator_api.h b/midgard-0.r2p0/mali_kbase_profiling_gator_api.h
new file mode 100755
index 0000000..7fb674e
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_profiling_gator_api.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_profiling_gator_api.h
+ * Model interface
+ */
+
+#ifndef _KBASE_PROFILING_GATOR_API_H_
+#define _KBASE_PROFILING_GATOR_API_H_
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control
+ * the frame buffer dumping and s/w counter reporting.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+#define FBDUMP_CONTROL_MAX (5)
+#define FBDUMP_CONTROL_MIN FBDUMP_CONTROL_ENABLE
+
+void _mali_profiling_control(u32 action, u32 value);
+
+#endif /* _KBASE_PROFILING_GATOR_API */
diff --git a/midgard-0.r2p0/mali_kbase_regs_history_debugfs.c b/midgard-0.r2p0/mali_kbase_regs_history_debugfs.c
new file mode 100755
index 0000000..c970650
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_regs_history_debugfs.c
@@ -0,0 +1,130 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase.h"
+
+#include "mali_kbase_regs_history_debugfs.h"
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+#include <linux/debugfs.h>
+
+
+static int regs_history_size_get(void *data, u64 *val)
+{
+ struct kbase_io_history *const h = data;
+
+ *val = h->size;
+
+ return 0;
+}
+
+static int regs_history_size_set(void *data, u64 val)
+{
+ struct kbase_io_history *const h = data;
+
+ return kbase_io_history_resize(h, (u16)val);
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(regs_history_size_fops,
+ regs_history_size_get,
+ regs_history_size_set,
+ "%llu\n");
+
+
+/**
+ * regs_history_show - show callback for the register access history file.
+ *
+ * @sfile: The debugfs entry
+ * @data: Data associated with the entry
+ *
+ * This function is called to dump all recent accesses to the GPU registers.
+ *
+ * @return 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int regs_history_show(struct seq_file *sfile, void *data)
+{
+ struct kbase_io_history *const h = sfile->private;
+ u16 i;
+ size_t iters;
+ unsigned long flags;
+
+ if (!h->enabled) {
+ seq_puts(sfile, "The register access history is disabled\n");
+ goto out;
+ }
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ iters = (h->size > h->count) ? h->count : h->size;
+ seq_printf(sfile, "Last %zu register accesses of %zu total:\n", iters,
+ h->count);
+ for (i = 0; i < iters; ++i) {
+ struct kbase_io_access *io =
+ &h->buf[(h->count - iters + i) % h->size];
+ char const access = (io->addr & 1) ? 'w' : 'r';
+
+ seq_printf(sfile, "%6i: %c: reg 0x%p val %08x\n", i, access,
+ (void *)(io->addr & ~0x1), io->value);
+ }
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+out:
+ return 0;
+}
+
+
+/**
+ * regs_history_open - open operation for regs_history debugfs file
+ *
+ * @in: &struct inode pointer
+ * @file: &struct file pointer
+ *
+ * @return file descriptor
+ */
+static int regs_history_open(struct inode *in, struct file *file)
+{
+ return single_open(file, &regs_history_show, in->i_private);
+}
+
+
+static const struct file_operations regs_history_fops = {
+ .open = &regs_history_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_bool("regs_history_enabled", S_IRUGO | S_IWUSR,
+ kbdev->mali_debugfs_directory,
+ &kbdev->io_history.enabled);
+ debugfs_create_file("regs_history_size", S_IRUGO | S_IWUSR,
+ kbdev->mali_debugfs_directory,
+ &kbdev->io_history, &regs_history_size_fops);
+ debugfs_create_file("regs_history", S_IRUGO,
+ kbdev->mali_debugfs_directory, &kbdev->io_history,
+ &regs_history_fops);
+}
+
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/midgard-0.r2p0/mali_kbase_regs_history_debugfs.h b/midgard-0.r2p0/mali_kbase_regs_history_debugfs.h
new file mode 100755
index 0000000..f108370
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_regs_history_debugfs.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Header file for register access history support via debugfs
+ *
+ * This interface is made available via /sys/kernel/debug/mali#/regs_history*.
+ *
+ * Usage:
+ * - regs_history_enabled: whether recording of register accesses is enabled.
+ * Write 'y' to enable, 'n' to disable.
+ * - regs_history_size: size of the register history buffer, must be > 0
+ * - regs_history: return the information about last accesses to the registers.
+ */
+
+#ifndef _KBASE_REGS_HISTORY_DEBUGFS_H
+#define _KBASE_REGS_HISTORY_DEBUGFS_H
+
+struct kbase_device;
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+/**
+ * kbasep_regs_history_debugfs_init - add debugfs entries for register history
+ *
+ * @kbdev: Pointer to kbase_device containing the register history
+ */
+void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbasep_regs_history_debugfs_init CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /*_KBASE_REGS_HISTORY_DEBUGFS_H*/
diff --git a/midgard-0.r2p0/mali_kbase_replay.c b/midgard-0.r2p0/mali_kbase_replay.c
new file mode 100755
index 0000000..84aa331
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_replay.c
@@ -0,0 +1,1166 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_replay.c
+ * Replay soft job handlers
+ */
+
+#include <linux/dma-mapping.h>
+#include <mali_kbase_config.h>
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mem_linux.h>
+
+#define JOB_NOT_STARTED 0
+#define JOB_TYPE_NULL (1)
+#define JOB_TYPE_VERTEX (5)
+#define JOB_TYPE_TILER (7)
+#define JOB_TYPE_FUSED (8)
+#define JOB_TYPE_FRAGMENT (9)
+
+#define JOB_HEADER_32_FBD_OFFSET (31*4)
+#define JOB_HEADER_64_FBD_OFFSET (44*4)
+
+#define FBD_POINTER_MASK (~0x3f)
+
+#define SFBD_TILER_OFFSET (48*4)
+
+#define MFBD_TILER_OFFSET (14*4)
+
+#define FBD_HIERARCHY_WEIGHTS 8
+#define FBD_HIERARCHY_MASK_MASK 0x1fff
+
+#define FBD_TYPE 1
+
+#define HIERARCHY_WEIGHTS 13
+
+#define JOB_HEADER_ID_MAX 0xffff
+
+#define JOB_SOURCE_ID(status) (((status) >> 16) & 0xFFFF)
+#define JOB_POLYGON_LIST (0x03)
+
+struct fragment_job {
+ struct job_descriptor_header header;
+
+ u32 x[2];
+ union {
+ u64 _64;
+ u32 _32;
+ } fragment_fbd;
+};
+
+static void dump_job_head(struct kbase_context *kctx, char *head_str,
+ struct job_descriptor_header *job)
+{
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kctx->kbdev->dev, "%s\n", head_str);
+ dev_dbg(kctx->kbdev->dev,
+ "addr = %p\n"
+ "exception_status = %x (Source ID: 0x%x Access: 0x%x Exception: 0x%x)\n"
+ "first_incomplete_task = %x\n"
+ "fault_pointer = %llx\n"
+ "job_descriptor_size = %x\n"
+ "job_type = %x\n"
+ "job_barrier = %x\n"
+ "_reserved_01 = %x\n"
+ "_reserved_02 = %x\n"
+ "_reserved_03 = %x\n"
+ "_reserved_04/05 = %x,%x\n"
+ "job_index = %x\n"
+ "dependencies = %x,%x\n",
+ job, job->exception_status,
+ JOB_SOURCE_ID(job->exception_status),
+ (job->exception_status >> 8) & 0x3,
+ job->exception_status & 0xFF,
+ job->first_incomplete_task,
+ job->fault_pointer, job->job_descriptor_size,
+ job->job_type, job->job_barrier, job->_reserved_01,
+ job->_reserved_02, job->_reserved_03,
+ job->_reserved_04, job->_reserved_05,
+ job->job_index,
+ job->job_dependency_index_1,
+ job->job_dependency_index_2);
+
+ if (job->job_descriptor_size)
+ dev_dbg(kctx->kbdev->dev, "next = %llx\n",
+ job->next_job._64);
+ else
+ dev_dbg(kctx->kbdev->dev, "next = %x\n",
+ job->next_job._32);
+#endif
+}
+
+static int kbasep_replay_reset_sfbd(struct kbase_context *kctx,
+ u64 fbd_address, u64 tiler_heap_free,
+ u16 hierarchy_mask, u32 default_weight)
+{
+ struct {
+ u32 padding_1[1];
+ u32 flags;
+ u64 padding_2[2];
+ u64 heap_free_address;
+ u32 padding[8];
+ u32 weights[FBD_HIERARCHY_WEIGHTS];
+ } *fbd_tiler;
+ struct kbase_vmap_struct map;
+
+ dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
+
+ fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET,
+ sizeof(*fbd_tiler), &map);
+ if (!fbd_tiler) {
+ dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kctx->kbdev->dev,
+ "FBD tiler:\n"
+ "flags = %x\n"
+ "heap_free_address = %llx\n",
+ fbd_tiler->flags, fbd_tiler->heap_free_address);
+#endif
+ if (hierarchy_mask) {
+ u32 weights[HIERARCHY_WEIGHTS];
+ u16 old_hierarchy_mask = fbd_tiler->flags &
+ FBD_HIERARCHY_MASK_MASK;
+ int i, j = 0;
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+ if (old_hierarchy_mask & (1 << i)) {
+ KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+ weights[i] = fbd_tiler->weights[j++];
+ } else {
+ weights[i] = default_weight;
+ }
+ }
+
+
+ dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n",
+ old_hierarchy_mask, hierarchy_mask);
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++)
+ dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+ i, weights[i]);
+
+ j = 0;
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+ if (hierarchy_mask & (1 << i)) {
+ KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+
+ dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n",
+ i, weights[i], j);
+
+ fbd_tiler->weights[j++] = weights[i];
+ }
+ }
+
+ for (; j < FBD_HIERARCHY_WEIGHTS; j++)
+ fbd_tiler->weights[j] = 0;
+
+ fbd_tiler->flags = hierarchy_mask | (1 << 16);
+ }
+
+ fbd_tiler->heap_free_address = tiler_heap_free;
+
+ dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n",
+ fbd_tiler->heap_free_address, fbd_tiler->flags);
+
+ kbase_vunmap(kctx, &map);
+
+ return 0;
+}
+
+static int kbasep_replay_reset_mfbd(struct kbase_context *kctx,
+ u64 fbd_address, u64 tiler_heap_free,
+ u16 hierarchy_mask, u32 default_weight)
+{
+ struct kbase_vmap_struct map;
+ struct {
+ u32 padding_0;
+ u32 flags;
+ u64 padding_1[2];
+ u64 heap_free_address;
+ u64 padding_2;
+ u32 weights[FBD_HIERARCHY_WEIGHTS];
+ } *fbd_tiler;
+
+ dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
+
+ fbd_tiler = kbase_vmap(kctx, fbd_address + MFBD_TILER_OFFSET,
+ sizeof(*fbd_tiler), &map);
+ if (!fbd_tiler) {
+ dev_err(kctx->kbdev->dev,
+ "kbasep_replay_reset_fbd: failed to map fbd\n");
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kctx->kbdev->dev, "FBD tiler:\n"
+ "flags = %x\n"
+ "heap_free_address = %llx\n",
+ fbd_tiler->flags,
+ fbd_tiler->heap_free_address);
+#endif
+ if (hierarchy_mask) {
+ u32 weights[HIERARCHY_WEIGHTS];
+ u16 old_hierarchy_mask = (fbd_tiler->flags) &
+ FBD_HIERARCHY_MASK_MASK;
+ int i, j = 0;
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+ if (old_hierarchy_mask & (1 << i)) {
+ KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+ weights[i] = fbd_tiler->weights[j++];
+ } else {
+ weights[i] = default_weight;
+ }
+ }
+
+
+ dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n",
+ old_hierarchy_mask, hierarchy_mask);
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++)
+ dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+ i, weights[i]);
+
+ j = 0;
+
+ for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+ if (hierarchy_mask & (1 << i)) {
+ KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+
+ dev_dbg(kctx->kbdev->dev,
+ " Writing hierarchy level %02d (%08x) to %d\n",
+ i, weights[i], j);
+
+ fbd_tiler->weights[j++] = weights[i];
+ }
+ }
+
+ for (; j < FBD_HIERARCHY_WEIGHTS; j++)
+ fbd_tiler->weights[j] = 0;
+
+ fbd_tiler->flags = hierarchy_mask | (1 << 16);
+ }
+
+ fbd_tiler->heap_free_address = tiler_heap_free;
+
+ kbase_vunmap(kctx, &map);
+
+ return 0;
+}
+
+/**
+ * @brief Reset the status of an FBD pointed to by a tiler job
+ *
+ * This performs two functions :
+ * - Set the hierarchy mask
+ * - Reset the tiler free heap address
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] job_header Address of job header to reset.
+ * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask The hierarchy mask to use
+ * @param[in] default_weight Default hierarchy weight to write when no other
+ * weight is given in the FBD
+ * @param[in] job_64 true if this job is using 64-bit
+ * descriptors
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_reset_tiler_job(struct kbase_context *kctx,
+ u64 job_header, u64 tiler_heap_free,
+ u16 hierarchy_mask, u32 default_weight, bool job_64)
+{
+ struct kbase_vmap_struct map;
+ u64 fbd_address;
+
+ if (job_64) {
+ u64 *job_ext;
+
+ job_ext = kbase_vmap(kctx,
+ job_header + JOB_HEADER_64_FBD_OFFSET,
+ sizeof(*job_ext), &map);
+
+ if (!job_ext) {
+ dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+ return -EINVAL;
+ }
+
+ fbd_address = *job_ext;
+
+ kbase_vunmap(kctx, &map);
+ } else {
+ u32 *job_ext;
+
+ job_ext = kbase_vmap(kctx,
+ job_header + JOB_HEADER_32_FBD_OFFSET,
+ sizeof(*job_ext), &map);
+
+ if (!job_ext) {
+ dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+ return -EINVAL;
+ }
+
+ fbd_address = *job_ext;
+
+ kbase_vunmap(kctx, &map);
+ }
+
+ if (fbd_address & FBD_TYPE) {
+ return kbasep_replay_reset_mfbd(kctx,
+ fbd_address & FBD_POINTER_MASK,
+ tiler_heap_free,
+ hierarchy_mask,
+ default_weight);
+ } else {
+ return kbasep_replay_reset_sfbd(kctx,
+ fbd_address & FBD_POINTER_MASK,
+ tiler_heap_free,
+ hierarchy_mask,
+ default_weight);
+ }
+}
+
+/**
+ * @brief Reset the status of a job
+ *
+ * This performs the following functions :
+ *
+ * - Reset the Job Status field of each job to NOT_STARTED.
+ * - Set the Job Type field of any Vertex Jobs to Null Job.
+ * - For any jobs using an FBD, set the Tiler Heap Free field to the value of
+ * the tiler_heap_free parameter, and set the hierarchy level mask to the
+ * hier_mask parameter.
+ * - Offset HW dependencies by the hw_job_id_offset parameter
+ * - Set the Perform Job Barrier flag if this job is the first in the chain
+ * - Read the address of the next job header
+ *
+ * @param[in] kctx Context pointer
+ * @param[in,out] job_header Address of job header to reset. Set to address
+ * of next job header on exit.
+ * @param[in] prev_jc Previous job chain to link to, if this job is
+ * the last in the chain.
+ * @param[in] hw_job_id_offset Offset for HW job IDs
+ * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask The hierarchy mask to use
+ * @param[in] default_weight Default hierarchy weight to write when no other
+ * weight is given in the FBD
+ * @param[in] first_in_chain true if this job is the first in the chain
+ * @param[in] fragment_chain true if this job is in the fragment chain
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_reset_job(struct kbase_context *kctx,
+ u64 *job_header, u64 prev_jc,
+ u64 tiler_heap_free, u16 hierarchy_mask,
+ u32 default_weight, u16 hw_job_id_offset,
+ bool first_in_chain, bool fragment_chain)
+{
+ struct fragment_job *frag_job;
+ struct job_descriptor_header *job;
+ u64 new_job_header;
+ struct kbase_vmap_struct map;
+
+ frag_job = kbase_vmap(kctx, *job_header, sizeof(*frag_job), &map);
+ if (!frag_job) {
+ dev_err(kctx->kbdev->dev,
+ "kbasep_replay_parse_jc: failed to map jc\n");
+ return -EINVAL;
+ }
+ job = &frag_job->header;
+
+ dump_job_head(kctx, "Job header:", job);
+
+ if (job->exception_status == JOB_NOT_STARTED && !fragment_chain) {
+ dev_err(kctx->kbdev->dev, "Job already not started\n");
+ goto out_unmap;
+ }
+ job->exception_status = JOB_NOT_STARTED;
+
+ if (job->job_type == JOB_TYPE_VERTEX)
+ job->job_type = JOB_TYPE_NULL;
+
+ if (job->job_type == JOB_TYPE_FUSED) {
+ dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n");
+ goto out_unmap;
+ }
+
+ if (first_in_chain)
+ job->job_barrier = 1;
+
+ if ((job->job_dependency_index_1 + hw_job_id_offset) >
+ JOB_HEADER_ID_MAX ||
+ (job->job_dependency_index_2 + hw_job_id_offset) >
+ JOB_HEADER_ID_MAX ||
+ (job->job_index + hw_job_id_offset) > JOB_HEADER_ID_MAX) {
+ dev_err(kctx->kbdev->dev,
+ "Job indicies/dependencies out of valid range\n");
+ goto out_unmap;
+ }
+
+ if (job->job_dependency_index_1)
+ job->job_dependency_index_1 += hw_job_id_offset;
+ if (job->job_dependency_index_2)
+ job->job_dependency_index_2 += hw_job_id_offset;
+
+ job->job_index += hw_job_id_offset;
+
+ if (job->job_descriptor_size) {
+ new_job_header = job->next_job._64;
+ if (!job->next_job._64)
+ job->next_job._64 = prev_jc;
+ } else {
+ new_job_header = job->next_job._32;
+ if (!job->next_job._32)
+ job->next_job._32 = prev_jc;
+ }
+ dump_job_head(kctx, "Updated to:", job);
+
+ if (job->job_type == JOB_TYPE_TILER) {
+ bool job_64 = job->job_descriptor_size != 0;
+
+ if (kbasep_replay_reset_tiler_job(kctx, *job_header,
+ tiler_heap_free, hierarchy_mask,
+ default_weight, job_64) != 0)
+ goto out_unmap;
+
+ } else if (job->job_type == JOB_TYPE_FRAGMENT) {
+ u64 fbd_address;
+
+ if (job->job_descriptor_size)
+ fbd_address = frag_job->fragment_fbd._64;
+ else
+ fbd_address = (u64)frag_job->fragment_fbd._32;
+
+ if (fbd_address & FBD_TYPE) {
+ if (kbasep_replay_reset_mfbd(kctx,
+ fbd_address & FBD_POINTER_MASK,
+ tiler_heap_free,
+ hierarchy_mask,
+ default_weight) != 0)
+ goto out_unmap;
+ } else {
+ if (kbasep_replay_reset_sfbd(kctx,
+ fbd_address & FBD_POINTER_MASK,
+ tiler_heap_free,
+ hierarchy_mask,
+ default_weight) != 0)
+ goto out_unmap;
+ }
+ }
+
+ kbase_vunmap(kctx, &map);
+
+ *job_header = new_job_header;
+
+ return 0;
+
+out_unmap:
+ kbase_vunmap(kctx, &map);
+ return -EINVAL;
+}
+
+/**
+ * @brief Find the highest job ID in a job chain
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] jc Job chain start address
+ * @param[out] hw_job_id Highest job ID in chain
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_find_hw_job_id(struct kbase_context *kctx,
+ u64 jc, u16 *hw_job_id)
+{
+ while (jc) {
+ struct job_descriptor_header *job;
+ struct kbase_vmap_struct map;
+
+ dev_dbg(kctx->kbdev->dev,
+ "kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
+
+ job = kbase_vmap(kctx, jc, sizeof(*job), &map);
+ if (!job) {
+ dev_err(kctx->kbdev->dev, "failed to map jc\n");
+
+ return -EINVAL;
+ }
+
+ if (job->job_index > *hw_job_id)
+ *hw_job_id = job->job_index;
+
+ if (job->job_descriptor_size)
+ jc = job->next_job._64;
+ else
+ jc = job->next_job._32;
+
+ kbase_vunmap(kctx, &map);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Reset the status of a number of jobs
+ *
+ * This function walks the provided job chain, and calls
+ * kbasep_replay_reset_job for each job. It also links the job chain to the
+ * provided previous job chain.
+ *
+ * The function will fail if any of the jobs passed already have status of
+ * NOT_STARTED.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] jc Job chain to be processed
+ * @param[in] prev_jc Job chain to be added to. May be NULL
+ * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask The hierarchy mask to use
+ * @param[in] default_weight Default hierarchy weight to write when no other
+ * weight is given in the FBD
+ * @param[in] hw_job_id_offset Offset for HW job IDs
+ * @param[in] fragment_chain true if this chain is the fragment chain
+ *
+ * @return 0 on success, error code otherwise
+ */
+static int kbasep_replay_parse_jc(struct kbase_context *kctx,
+ u64 jc, u64 prev_jc,
+ u64 tiler_heap_free, u16 hierarchy_mask,
+ u32 default_weight, u16 hw_job_id_offset,
+ bool fragment_chain)
+{
+ bool first_in_chain = true;
+ int nr_jobs = 0;
+
+ dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
+ jc, hw_job_id_offset);
+
+ while (jc) {
+ dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc);
+
+ if (kbasep_replay_reset_job(kctx, &jc, prev_jc,
+ tiler_heap_free, hierarchy_mask,
+ default_weight, hw_job_id_offset,
+ first_in_chain, fragment_chain) != 0)
+ return -EINVAL;
+
+ first_in_chain = false;
+
+ nr_jobs++;
+ if (fragment_chain &&
+ nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
+ dev_err(kctx->kbdev->dev,
+ "Exceeded maximum number of jobs in fragment chain\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Reset the status of a replay job, and set up dependencies
+ *
+ * This performs the actions to allow the replay job to be re-run following
+ * completion of the passed dependency.
+ *
+ * @param[in] katom The atom to be reset
+ * @param[in] dep_atom The dependency to be attached to the atom
+ */
+static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom,
+ struct kbase_jd_atom *dep_atom)
+{
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+ kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA);
+ list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
+}
+
+/**
+ * @brief Allocate an unused katom
+ *
+ * This will search the provided context for an unused katom, and will mark it
+ * as KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * If no atoms are available then the function will fail.
+ *
+ * @param[in] kctx Context pointer
+ * @return An atom ID, or -1 on failure
+ */
+static int kbasep_allocate_katom(struct kbase_context *kctx)
+{
+ struct kbase_jd_context *jctx = &kctx->jctx;
+ int i;
+
+ for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
+ if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
+ jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
+ dev_dbg(kctx->kbdev->dev,
+ "kbasep_allocate_katom: Allocated atom %d\n",
+ i);
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * @brief Release a katom
+ *
+ * This will mark the provided atom as available, and remove any dependencies.
+ *
+ * For use on error path.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] atom_id ID of atom to release
+ */
+static void kbasep_release_katom(struct kbase_context *kctx, int atom_id)
+{
+ struct kbase_jd_context *jctx = &kctx->jctx;
+
+ dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n",
+ atom_id);
+
+ while (!list_empty(&jctx->atoms[atom_id].dep_head[0]))
+ list_del(jctx->atoms[atom_id].dep_head[0].next);
+
+ while (!list_empty(&jctx->atoms[atom_id].dep_head[1]))
+ list_del(jctx->atoms[atom_id].dep_head[1].next);
+
+ jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED;
+}
+
+static void kbasep_replay_create_atom(struct kbase_context *kctx,
+ struct base_jd_atom_v2 *atom,
+ int atom_nr,
+ base_jd_prio prio)
+{
+ atom->nr_extres = 0;
+ atom->extres_list.value = NULL;
+ atom->device_nr = 0;
+ atom->prio = prio;
+ atom->atom_number = atom_nr;
+
+ base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID);
+ base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID);
+
+ atom->udata.blob[0] = 0;
+ atom->udata.blob[1] = 0;
+}
+
+/**
+ * @brief Create two atoms for the purpose of replaying jobs
+ *
+ * Two atoms are allocated and created. The jc pointer is not set at this
+ * stage. The second atom has a dependency on the first. The remaining fields
+ * are set up as follows :
+ *
+ * - No external resources. Any required external resources will be held by the
+ * replay atom.
+ * - device_nr is set to 0. This is not relevant as
+ * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
+ * - Priority is inherited from the replay job.
+ *
+ * @param[out] t_atom Atom to use for tiler jobs
+ * @param[out] f_atom Atom to use for fragment jobs
+ * @param[in] prio Priority of new atom (inherited from replay soft
+ * job)
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_create_atoms(struct kbase_context *kctx,
+ struct base_jd_atom_v2 *t_atom,
+ struct base_jd_atom_v2 *f_atom,
+ base_jd_prio prio)
+{
+ int t_atom_nr, f_atom_nr;
+
+ t_atom_nr = kbasep_allocate_katom(kctx);
+ if (t_atom_nr < 0) {
+ dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
+ return -EINVAL;
+ }
+
+ f_atom_nr = kbasep_allocate_katom(kctx);
+ if (f_atom_nr < 0) {
+ dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
+ kbasep_release_katom(kctx, t_atom_nr);
+ return -EINVAL;
+ }
+
+ kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
+ kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
+
+ base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA);
+
+ return 0;
+}
+
+#ifdef CONFIG_MALI_DEBUG
+static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload)
+{
+ u64 next;
+
+ dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n");
+ next = payload->tiler_jc_list;
+
+ while (next) {
+ struct kbase_vmap_struct map;
+ base_jd_replay_jc *jc_struct;
+
+ jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &map);
+
+ if (!jc_struct)
+ return;
+
+ dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n",
+ jc_struct, jc_struct->jc, jc_struct->next);
+
+ next = jc_struct->next;
+
+ kbase_vunmap(kctx, &map);
+ }
+}
+#endif
+
+/**
+ * @brief Parse a base_jd_replay_payload provided by userspace
+ *
+ * This will read the payload from userspace, and parse the job chains.
+ *
+ * @param[in] kctx Context pointer
+ * @param[in] replay_atom Replay soft job atom
+ * @param[in] t_atom Atom to use for tiler jobs
+ * @param[in] f_atom Atom to use for fragment jobs
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_parse_payload(struct kbase_context *kctx,
+ struct kbase_jd_atom *replay_atom,
+ struct base_jd_atom_v2 *t_atom,
+ struct base_jd_atom_v2 *f_atom)
+{
+ base_jd_replay_payload *payload = NULL;
+ u64 next;
+ u64 prev_jc = 0;
+ u16 hw_job_id_offset = 0;
+ int ret = -EINVAL;
+ struct kbase_vmap_struct map;
+
+ dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n",
+ replay_atom->jc, sizeof(payload));
+
+ payload = kbase_vmap(kctx, replay_atom->jc, sizeof(*payload), &map);
+ if (!payload) {
+ dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
+ return -EINVAL;
+ }
+
+#ifdef BASE_LEGACY_UK10_2_SUPPORT
+ if (KBASE_API_VERSION(10, 3) > replay_atom->kctx->api_version) {
+ base_jd_replay_payload_uk10_2 *payload_uk10_2;
+ u16 tiler_core_req;
+ u16 fragment_core_req;
+
+ payload_uk10_2 = (base_jd_replay_payload_uk10_2 *) payload;
+ memcpy(&tiler_core_req, &payload_uk10_2->tiler_core_req,
+ sizeof(tiler_core_req));
+ memcpy(&fragment_core_req, &payload_uk10_2->fragment_core_req,
+ sizeof(fragment_core_req));
+ payload->tiler_core_req = (u32)(tiler_core_req & 0x7fff);
+ payload->fragment_core_req = (u32)(fragment_core_req & 0x7fff);
+ }
+#endif /* BASE_LEGACY_UK10_2_SUPPORT */
+
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
+ dev_dbg(kctx->kbdev->dev, "Payload structure:\n"
+ "tiler_jc_list = %llx\n"
+ "fragment_jc = %llx\n"
+ "tiler_heap_free = %llx\n"
+ "fragment_hierarchy_mask = %x\n"
+ "tiler_hierarchy_mask = %x\n"
+ "hierarchy_default_weight = %x\n"
+ "tiler_core_req = %x\n"
+ "fragment_core_req = %x\n",
+ payload->tiler_jc_list,
+ payload->fragment_jc,
+ payload->tiler_heap_free,
+ payload->fragment_hierarchy_mask,
+ payload->tiler_hierarchy_mask,
+ payload->hierarchy_default_weight,
+ payload->tiler_core_req,
+ payload->fragment_core_req);
+ payload_dump(kctx, payload);
+#endif
+ t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER;
+ f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER;
+
+ /* Sanity check core requirements*/
+ if ((t_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_T ||
+ (f_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_FS ||
+ t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES ||
+ f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+
+ int t_atom_type = t_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP;
+ int f_atom_type = f_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP & ~BASE_JD_REQ_FS_AFBC;
+ int t_has_ex_res = t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
+ int f_has_ex_res = f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
+
+ if (t_atom_type != BASE_JD_REQ_T) {
+ dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom not a tiler job. Was: 0x%x\n Expected: 0x%x",
+ t_atom_type, BASE_JD_REQ_T);
+ }
+ if (f_atom_type != BASE_JD_REQ_FS) {
+ dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom not a fragment shader. Was 0x%x Expected: 0x%x\n",
+ f_atom_type, BASE_JD_REQ_FS);
+ }
+ if (t_has_ex_res) {
+ dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom has external resources.\n");
+ }
+ if (f_has_ex_res) {
+ dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom has external resources.\n");
+ }
+
+ goto out;
+ }
+
+ /* Process tiler job chains */
+ next = payload->tiler_jc_list;
+ if (!next) {
+ dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n");
+ goto out;
+ }
+
+ while (next) {
+ base_jd_replay_jc *jc_struct;
+ struct kbase_vmap_struct jc_map;
+ u64 jc;
+
+ jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &jc_map);
+
+ if (!jc_struct) {
+ dev_err(kctx->kbdev->dev, "Failed to map jc struct\n");
+ goto out;
+ }
+
+ jc = jc_struct->jc;
+ next = jc_struct->next;
+ if (next)
+ jc_struct->jc = 0;
+
+ kbase_vunmap(kctx, &jc_map);
+
+ if (jc) {
+ u16 max_hw_job_id = 0;
+
+ if (kbasep_replay_find_hw_job_id(kctx, jc,
+ &max_hw_job_id) != 0)
+ goto out;
+
+ if (kbasep_replay_parse_jc(kctx, jc, prev_jc,
+ payload->tiler_heap_free,
+ payload->tiler_hierarchy_mask,
+ payload->hierarchy_default_weight,
+ hw_job_id_offset, false) != 0) {
+ goto out;
+ }
+
+ hw_job_id_offset += max_hw_job_id;
+
+ prev_jc = jc;
+ }
+ }
+ t_atom->jc = prev_jc;
+
+ /* Process fragment job chain */
+ f_atom->jc = payload->fragment_jc;
+ if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0,
+ payload->tiler_heap_free,
+ payload->fragment_hierarchy_mask,
+ payload->hierarchy_default_weight, 0,
+ true) != 0) {
+ goto out;
+ }
+
+ if (!t_atom->jc || !f_atom->jc) {
+ dev_err(kctx->kbdev->dev, "Invalid payload\n");
+ goto out;
+ }
+
+ dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
+ t_atom->jc, f_atom->jc);
+ ret = 0;
+
+out:
+ kbase_vunmap(kctx, &map);
+
+ return ret;
+}
+
+static void kbase_replay_process_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_context *kctx;
+ struct kbase_jd_context *jctx;
+ bool need_to_try_schedule_context = false;
+
+ struct base_jd_atom_v2 t_atom, f_atom;
+ struct kbase_jd_atom *t_katom, *f_katom;
+ base_jd_prio atom_prio;
+
+ katom = container_of(data, struct kbase_jd_atom, work);
+ kctx = katom->kctx;
+ jctx = &kctx->jctx;
+
+ mutex_lock(&jctx->lock);
+
+ atom_prio = kbasep_js_sched_prio_to_atom_prio(katom->sched_priority);
+
+ if (kbasep_replay_create_atoms(
+ kctx, &t_atom, &f_atom, atom_prio) != 0) {
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ goto out;
+ }
+
+ t_katom = &jctx->atoms[t_atom.atom_number];
+ f_katom = &jctx->atoms[f_atom.atom_number];
+
+ if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) != 0) {
+ kbasep_release_katom(kctx, t_atom.atom_number);
+ kbasep_release_katom(kctx, f_atom.atom_number);
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ goto out;
+ }
+
+ kbasep_replay_reset_softjob(katom, f_katom);
+
+ need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom);
+ if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
+ dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
+ kbasep_release_katom(kctx, f_atom.atom_number);
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ goto out;
+ }
+ need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom);
+ if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
+ dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ goto out;
+ }
+
+ katom->event_code = BASE_JD_EVENT_DONE;
+
+out:
+ if (katom->event_code != BASE_JD_EVENT_DONE) {
+ kbase_disjoint_state_down(kctx->kbdev);
+
+ need_to_try_schedule_context |= jd_done_nolock(katom, NULL);
+ }
+
+ if (need_to_try_schedule_context)
+ kbase_js_sched_all(kctx->kbdev);
+
+ mutex_unlock(&jctx->lock);
+}
+
+/**
+ * @brief Check job replay fault
+ *
+ * This will read the job payload, checks fault type and source, then decides
+ * whether replay is required.
+ *
+ * @param[in] katom The atom to be processed
+ * @return true (success) if replay required or false on failure.
+ */
+static bool kbase_replay_fault_check(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct device *dev = kctx->kbdev->dev;
+ base_jd_replay_payload *payload;
+ u64 job_header;
+ u64 job_loop_detect;
+ struct job_descriptor_header *job;
+ struct kbase_vmap_struct job_map;
+ struct kbase_vmap_struct map;
+ bool err = false;
+
+ /* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or
+ * if force_replay is enabled.
+ */
+ if (BASE_JD_EVENT_TERMINATED == katom->event_code) {
+ return false;
+ } else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) {
+ return true;
+ } else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) {
+ katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT;
+ return true;
+ } else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) {
+ /* No replay for faults of type other than
+ * BASE_JD_EVENT_DATA_INVALID_FAULT.
+ */
+ return false;
+ }
+
+ /* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc
+ * to find out whether the source of exception is POLYGON_LIST. Replay
+ * is required if the source of fault is POLYGON_LIST.
+ */
+ payload = kbase_vmap(kctx, katom->jc, sizeof(*payload), &map);
+ if (!payload) {
+ dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n");
+ return false;
+ }
+
+#ifdef CONFIG_MALI_DEBUG
+ dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload);
+ dev_dbg(dev, "\nPayload structure:\n"
+ "fragment_jc = 0x%llx\n"
+ "fragment_hierarchy_mask = 0x%x\n"
+ "fragment_core_req = 0x%x\n",
+ payload->fragment_jc,
+ payload->fragment_hierarchy_mask,
+ payload->fragment_core_req);
+#endif
+ /* Process fragment job chain */
+ job_header = (u64) payload->fragment_jc;
+ job_loop_detect = job_header;
+ while (job_header) {
+ job = kbase_vmap(kctx, job_header, sizeof(*job), &job_map);
+ if (!job) {
+ dev_err(dev, "failed to map jc\n");
+ /* unmap payload*/
+ kbase_vunmap(kctx, &map);
+ return false;
+ }
+
+
+ dump_job_head(kctx, "\njob_head structure:\n", job);
+
+ /* Replay only when the polygon list reader caused the
+ * DATA_INVALID_FAULT */
+ if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) &&
+ (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->exception_status))) {
+ err = true;
+ kbase_vunmap(kctx, &job_map);
+ break;
+ }
+
+ /* Move on to next fragment job in the list */
+ if (job->job_descriptor_size)
+ job_header = job->next_job._64;
+ else
+ job_header = job->next_job._32;
+
+ kbase_vunmap(kctx, &job_map);
+
+ /* Job chain loop detected */
+ if (job_header == job_loop_detect)
+ break;
+ }
+
+ /* unmap payload*/
+ kbase_vunmap(kctx, &map);
+
+ return err;
+}
+
+
+/**
+ * @brief Process a replay job
+ *
+ * Called from kbase_process_soft_job.
+ *
+ * On exit, if the job has completed, katom->event_code will have been updated.
+ * If the job has not completed, and is replaying jobs, then the atom status
+ * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * @param[in] katom The atom to be processed
+ * @return false if the atom has completed
+ * true if the atom is replaying jobs
+ */
+bool kbase_replay_process(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+
+ /* Don't replay this atom if these issues are not present in the
+ * hardware */
+ if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11020) &&
+ !kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11024)) {
+ dev_dbg(kbdev->dev, "Hardware does not need replay workaround");
+
+ /* Signal failure to userspace */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+
+ return false;
+ }
+
+ if (katom->event_code == BASE_JD_EVENT_DONE) {
+ dev_dbg(kbdev->dev, "Previous job succeeded - not replaying\n");
+
+ if (katom->retry_count)
+ kbase_disjoint_state_down(kbdev);
+
+ return false;
+ }
+
+ if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+ dev_dbg(kbdev->dev, "Not replaying; context is dying\n");
+
+ if (katom->retry_count)
+ kbase_disjoint_state_down(kbdev);
+
+ return false;
+ }
+
+ /* Check job exception type and source before replaying. */
+ if (!kbase_replay_fault_check(katom)) {
+ dev_dbg(kbdev->dev,
+ "Replay cancelled on event %x\n", katom->event_code);
+ /* katom->event_code is already set to the failure code of the
+ * previous job.
+ */
+ return false;
+ }
+
+ dev_warn(kbdev->dev, "Replaying jobs retry=%d\n",
+ katom->retry_count);
+
+ katom->retry_count++;
+
+ if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
+ dev_err(kbdev->dev, "Replay exceeded limit - failing jobs\n");
+
+ kbase_disjoint_state_down(kbdev);
+
+ /* katom->event_code is already set to the failure code of the
+ previous job */
+ return false;
+ }
+
+ /* only enter the disjoint state once for the whole time while the replay is ongoing */
+ if (katom->retry_count == 1)
+ kbase_disjoint_state_up(kbdev);
+
+ INIT_WORK(&katom->work, kbase_replay_process_worker);
+ queue_work(kctx->event_workq, &katom->work);
+
+ return true;
+}
diff --git a/midgard-0.r2p0/mali_kbase_smc.c b/midgard-0.r2p0/mali_kbase_smc.c
new file mode 100755
index 0000000..43175c8
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_smc.c
@@ -0,0 +1,74 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_ARM64
+
+#include <mali_kbase.h>
+#include <mali_kbase_smc.h>
+
+#include <linux/compiler.h>
+
+static noinline u64 invoke_smc_fid(u64 function_id,
+ u64 arg0, u64 arg1, u64 arg2)
+{
+ register u64 x0 asm("x0") = function_id;
+ register u64 x1 asm("x1") = arg0;
+ register u64 x2 asm("x2") = arg1;
+ register u64 x3 asm("x3") = arg2;
+
+ asm volatile(
+ __asmeq("%0", "x0")
+ __asmeq("%1", "x1")
+ __asmeq("%2", "x2")
+ __asmeq("%3", "x3")
+ "smc #0\n"
+ : "+r" (x0)
+ : "r" (x1), "r" (x2), "r" (x3));
+
+ return x0;
+}
+
+u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2)
+{
+ /* Is fast call (bit 31 set) */
+ KBASE_DEBUG_ASSERT(fid & ~SMC_FAST_CALL);
+ /* bits 16-23 must be zero for fast calls */
+ KBASE_DEBUG_ASSERT((fid & (0xFF << 16)) == 0);
+
+ return invoke_smc_fid(fid, arg0, arg1, arg2);
+}
+
+u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
+ u64 arg0, u64 arg1, u64 arg2)
+{
+ u32 fid = 0;
+
+ /* Only the six bits allowed should be used. */
+ KBASE_DEBUG_ASSERT((oen & ~SMC_OEN_MASK) == 0);
+
+ fid |= SMC_FAST_CALL; /* Bit 31: Fast call */
+ if (smc64)
+ fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */
+ fid |= oen; /* Bit 29:24: OEN */
+ /* Bit 23:16: Must be zero for fast calls */
+ fid |= (function_number); /* Bit 15:0: function number */
+
+ return kbase_invoke_smc_fid(fid, arg0, arg1, arg2);
+}
+
+#endif /* CONFIG_ARM64 */
+
diff --git a/midgard-0.r2p0/mali_kbase_smc.h b/midgard-0.r2p0/mali_kbase_smc.h
new file mode 100755
index 0000000..9bff3d2
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_smc.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_SMC_H_
+#define _KBASE_SMC_H_
+
+#ifdef CONFIG_ARM64
+
+#include <mali_kbase.h>
+
+#define SMC_FAST_CALL (1 << 31)
+#define SMC_64 (1 << 30)
+
+#define SMC_OEN_OFFSET 24
+#define SMC_OEN_MASK (0x3F << SMC_OEN_OFFSET) /* 6 bits */
+#define SMC_OEN_SIP (2 << SMC_OEN_OFFSET)
+#define SMC_OEN_STD (4 << SMC_OEN_OFFSET)
+
+
+/**
+ * kbase_invoke_smc_fid - Perform a secure monitor call
+ * @fid: The SMC function to call, see SMC Calling convention.
+ * @arg0: First argument to the SMC.
+ * @arg1: Second argument to the SMC.
+ * @arg2: Third argument to the SMC.
+ *
+ * See SMC Calling Convention for details.
+ *
+ * Return: the return value from the SMC.
+ */
+u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2);
+
+/**
+ * kbase_invoke_smc_fid - Perform a secure monitor call
+ * @oen: Owning Entity number (SIP, STD etc).
+ * @function_number: The function number within the OEN.
+ * @smc64: use SMC64 calling convention instead of SMC32.
+ * @arg0: First argument to the SMC.
+ * @arg1: Second argument to the SMC.
+ * @arg2: Third argument to the SMC.
+ *
+ * See SMC Calling Convention for details.
+ *
+ * Return: the return value from the SMC call.
+ */
+u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
+ u64 arg0, u64 arg1, u64 arg2);
+
+#endif /* CONFIG_ARM64 */
+
+#endif /* _KBASE_SMC_H_ */
diff --git a/midgard-0.r2p0/mali_kbase_softjobs.c b/midgard-0.r2p0/mali_kbase_softjobs.c
new file mode 100755
index 0000000..4f05bc0
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_softjobs.c
@@ -0,0 +1,1504 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#include <asm/cacheflush.h>
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+#include <linux/dma-mapping.h>
+#ifdef CONFIG_SYNC
+#include "sync.h"
+#include <linux/syscalls.h>
+#include "mali_kbase_sync.h"
+#endif
+#include <mali_base_kernel.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_mem_linux.h>
+#include <linux/version.h>
+#include <linux/ktime.h>
+#include <linux/pfn.h>
+#include <linux/sched.h>
+
+/* Mask to check cache alignment of data structures */
+#define KBASE_CACHE_ALIGNMENT_MASK ((1<<L1_CACHE_SHIFT)-1)
+
+/**
+ * @file mali_kbase_softjobs.c
+ *
+ * This file implements the logic behind software only jobs that are
+ * executed within the driver rather than being handed over to the GPU.
+ */
+
+void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ unsigned long lflags;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+ list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ unsigned long lflags;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+ list_del(&katom->queue);
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+
+ /* Record the start time of this atom so we could cancel it at
+ * the right time.
+ */
+ katom->start_timestamp = ktime_get();
+
+ /* Add the atom to the waiting list before the timer is
+ * (re)started to make sure that it gets processed.
+ */
+ kbasep_add_waiting_soft_job(katom);
+
+ /* Schedule timeout of this atom after a period if it is not active */
+ if (!timer_pending(&kctx->soft_job_timeout)) {
+ int timeout_ms = atomic_read(
+ &kctx->kbdev->js_data.soft_job_timeout_ms);
+ mod_timer(&kctx->soft_job_timeout,
+ jiffies + msecs_to_jiffies(timeout_ms));
+ }
+}
+
+static int kbasep_read_soft_event_status(
+ struct kbase_context *kctx, u64 evt, unsigned char *status)
+{
+ unsigned char *mapped_evt;
+ struct kbase_vmap_struct map;
+
+ mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
+ if (!mapped_evt)
+ return -EFAULT;
+
+ *status = *mapped_evt;
+
+ kbase_vunmap(kctx, &map);
+
+ return 0;
+}
+
+static int kbasep_write_soft_event_status(
+ struct kbase_context *kctx, u64 evt, unsigned char new_status)
+{
+ unsigned char *mapped_evt;
+ struct kbase_vmap_struct map;
+
+ if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
+ (new_status != BASE_JD_SOFT_EVENT_RESET))
+ return -EINVAL;
+
+ mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
+ if (!mapped_evt)
+ return -EFAULT;
+
+ *mapped_evt = new_status;
+
+ kbase_vunmap(kctx, &map);
+
+ return 0;
+}
+
+static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
+{
+ struct kbase_vmap_struct map;
+ void *user_result;
+ struct timespec ts;
+ struct base_dump_cpu_gpu_counters data;
+ u64 system_time;
+ u64 cycle_counter;
+ u64 jc = katom->jc;
+ struct kbase_context *kctx = katom->kctx;
+ int pm_active_err;
+
+ memset(&data, 0, sizeof(data));
+
+ /* Take the PM active reference as late as possible - otherwise, it could
+ * delay suspend until we process the atom (which may be at the end of a
+ * long chain of dependencies */
+ pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
+ if (pm_active_err) {
+ struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
+
+ /* We're suspended - queue this on the list of suspended jobs
+ * Use dep_item[1], because dep_item[0] was previously in use
+ * for 'waiting_soft_jobs'.
+ */
+ mutex_lock(&js_devdata->runpool_mutex);
+ list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /* Also adding this to the list of waiting soft job */
+ kbasep_add_waiting_soft_job(katom);
+
+ return pm_active_err;
+ }
+
+ kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
+ &ts);
+
+ kbase_pm_context_idle(kctx->kbdev);
+
+ data.sec = ts.tv_sec;
+ data.usec = ts.tv_nsec / 1000;
+ data.system_time = system_time;
+ data.cycle_counter = cycle_counter;
+
+ /* Assume this atom will be cancelled until we know otherwise */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ /* GPU_WR access is checked on the range for returning the result to
+ * userspace for the following reasons:
+ * - security, this is currently how imported user bufs are checked.
+ * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
+ user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
+ if (!user_result)
+ return 0;
+
+ memcpy(user_result, &data, sizeof(data));
+
+ kbase_vunmap(kctx, &map);
+
+ /* Atom was fine - mark it as done */
+ katom->event_code = BASE_JD_EVENT_DONE;
+
+ return 0;
+}
+
+#ifdef CONFIG_SYNC
+
+static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
+{
+ struct sync_pt *pt;
+ struct sync_timeline *timeline;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ if (!list_is_singular(&katom->fence->pt_list_head)) {
+#else
+ if (katom->fence->num_fences != 1) {
+#endif
+ /* Not exactly one item in the list - so it didn't (directly) come from us */
+ return BASE_JD_EVENT_JOB_CANCELLED;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list);
+#else
+ pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
+#endif
+ timeline = sync_pt_parent(pt);
+
+ if (!kbase_sync_timeline_is_ours(timeline)) {
+ /* Fence has a sync_pt which isn't ours! */
+ return BASE_JD_EVENT_JOB_CANCELLED;
+ }
+
+ kbase_sync_signal_pt(pt, result);
+
+ sync_timeline_signal(timeline);
+
+ return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
+}
+
+static void kbase_fence_wait_worker(struct work_struct *data)
+{
+ struct kbase_jd_atom *katom;
+ struct kbase_context *kctx;
+
+ katom = container_of(data, struct kbase_jd_atom, work);
+ kctx = katom->kctx;
+
+ mutex_lock(&kctx->jctx.lock);
+ kbasep_remove_waiting_soft_job(katom);
+ kbase_finish_soft_job(katom);
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(kctx->kbdev);
+ mutex_unlock(&kctx->jctx.lock);
+}
+
+static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
+{
+ struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
+ struct kbase_context *kctx;
+
+ KBASE_DEBUG_ASSERT(NULL != katom);
+
+ kctx = katom->kctx;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ /* Propagate the fence status to the atom.
+ * If negative then cancel this atom and its dependencies.
+ */
+ if (kbase_fence_get_status(fence) < 0)
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
+ *
+ * The issue is that we may signal the timeline while holding kctx->jctx.lock and
+ * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
+ */
+
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, kbase_fence_wait_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+static int kbase_fence_wait(struct kbase_jd_atom *katom)
+{
+ int ret;
+
+ KBASE_DEBUG_ASSERT(NULL != katom);
+ KBASE_DEBUG_ASSERT(NULL != katom->kctx);
+
+ sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
+
+ ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
+
+ if (ret == 1) {
+ /* Already signalled */
+ return 0;
+ }
+
+ if (ret < 0) {
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ /* We should cause the dependent jobs in the bag to be failed,
+ * to do this we schedule the work queue to complete this job */
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, kbase_fence_wait_worker);
+ queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
+ }
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+ /* The timeout code will add this job to the list of waiting soft jobs.
+ */
+ kbasep_add_waiting_with_timeout(katom);
+#else
+ kbasep_add_waiting_soft_job(katom);
+#endif
+
+ return 1;
+}
+
+static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
+{
+ if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
+ /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
+ return;
+ }
+
+ /* Wait was cancelled - zap the atoms */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ kbasep_remove_waiting_soft_job(katom);
+ kbase_finish_soft_job(katom);
+
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+}
+#endif /* CONFIG_SYNC */
+
+static void kbasep_soft_event_complete_job(struct work_struct *work)
+{
+ struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
+ work);
+ struct kbase_context *kctx = katom->kctx;
+ int resched;
+
+ mutex_lock(&kctx->jctx.lock);
+ resched = jd_done_nolock(katom, NULL);
+ mutex_unlock(&kctx->jctx.lock);
+
+ if (resched)
+ kbase_js_sched_all(kctx->kbdev);
+}
+
+void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
+{
+ int cancel_timer = 1;
+ struct list_head *entry, *tmp;
+ unsigned long lflags;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+ list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+ struct kbase_jd_atom *katom = list_entry(
+ entry, struct kbase_jd_atom, queue);
+
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ if (katom->jc == evt) {
+ list_del(&katom->queue);
+
+ katom->event_code = BASE_JD_EVENT_DONE;
+ INIT_WORK(&katom->work,
+ kbasep_soft_event_complete_job);
+ queue_work(kctx->jctx.job_done_wq,
+ &katom->work);
+ } else {
+ /* There are still other waiting jobs, we cannot
+ * cancel the timer yet.
+ */
+ cancel_timer = 0;
+ }
+ break;
+#ifdef CONFIG_MALI_FENCE_DEBUG
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ /* Keep the timer running if fence debug is enabled and
+ * there are waiting fence jobs.
+ */
+ cancel_timer = 0;
+ break;
+#endif
+ }
+ }
+
+ if (cancel_timer)
+ del_timer(&kctx->soft_job_timeout);
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+static char *kbase_fence_debug_status_string(int status)
+{
+ if (status == 0)
+ return "signaled";
+ else if (status > 0)
+ return "active";
+ else
+ return "error";
+}
+
+static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct device *dev = kctx->kbdev->dev;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct kbase_jd_atom *dep;
+
+ list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
+ if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
+ dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
+ continue;
+
+ if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+ == BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
+ struct sync_fence *fence = dep->fence;
+ int status = kbase_fence_get_status(fence);
+
+ /* Found blocked trigger fence. */
+ dev_warn(dev,
+ "\tVictim trigger atom %d fence [%p] %s: %s\n",
+ kbase_jd_atom_id(kctx, dep),
+ fence, fence->name,
+ kbase_fence_debug_status_string(status));
+ }
+
+ kbase_fence_debug_check_atom(dep);
+ }
+ }
+}
+
+static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct device *dev = katom->kctx->kbdev->dev;
+ struct sync_fence *fence = katom->fence;
+ int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
+ int status = kbase_fence_get_status(fence);
+ unsigned long lflags;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+
+ dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
+ kctx->tgid, kctx->id,
+ kbase_jd_atom_id(kctx, katom),
+ fence, timeout_ms);
+ dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
+ fence, fence->name,
+ kbase_fence_debug_status_string(status));
+
+ /* Search for blocked trigger atoms */
+ kbase_fence_debug_check_atom(katom);
+
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+
+ /* Dump out the full state of all the Android sync fences.
+ * The function sync_dump() isn't exported to modules, so force
+ * sync_fence_wait() to time out to trigger sync_dump().
+ */
+ sync_fence_wait(fence, 1);
+}
+
+struct kbase_fence_debug_work {
+ struct kbase_jd_atom *katom;
+ struct work_struct work;
+};
+
+static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
+{
+ struct kbase_fence_debug_work *w = container_of(work,
+ struct kbase_fence_debug_work, work);
+ struct kbase_jd_atom *katom = w->katom;
+ struct kbase_context *kctx = katom->kctx;
+
+ mutex_lock(&kctx->jctx.lock);
+ kbase_fence_debug_wait_timeout(katom);
+ mutex_unlock(&kctx->jctx.lock);
+
+ kfree(w);
+}
+
+static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
+{
+ struct kbase_fence_debug_work *work;
+ struct kbase_context *kctx = katom->kctx;
+
+ /* Enqueue fence debug worker. Use job_done_wq to get
+ * debug print ordered with job completion.
+ */
+ work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
+ /* Ignore allocation failure. */
+ if (work) {
+ work->katom = katom;
+ INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
+ queue_work(kctx->jctx.job_done_wq, &work->work);
+ }
+}
+#endif /* CONFIG_MALI_FENCE_DEBUG */
+
+void kbasep_soft_job_timeout_worker(unsigned long data)
+{
+ struct kbase_context *kctx = (struct kbase_context *)data;
+ u32 timeout_ms = (u32)atomic_read(
+ &kctx->kbdev->js_data.soft_job_timeout_ms);
+ struct timer_list *timer = &kctx->soft_job_timeout;
+ ktime_t cur_time = ktime_get();
+ bool restarting = false;
+ unsigned long lflags;
+ struct list_head *entry, *tmp;
+
+ spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+ list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+ struct kbase_jd_atom *katom = list_entry(entry,
+ struct kbase_jd_atom, queue);
+ s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
+ katom->start_timestamp));
+
+ if (elapsed_time < (s64)timeout_ms) {
+ restarting = true;
+ continue;
+ }
+
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ /* Take it out of the list to ensure that it
+ * will be cancelled in all cases
+ */
+ list_del(&katom->queue);
+
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+ break;
+#ifdef CONFIG_MALI_FENCE_DEBUG
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ kbase_fence_debug_timeout(katom);
+ break;
+#endif
+ }
+ }
+
+ if (restarting)
+ mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
+ spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ unsigned char status;
+
+ /* The status of this soft-job is stored in jc */
+ if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ return 0;
+ }
+
+ if (status == BASE_JD_SOFT_EVENT_SET)
+ return 0; /* Event already set, nothing to do */
+
+ kbasep_add_waiting_with_timeout(katom);
+
+ return 1;
+}
+
+static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
+ unsigned char new_status)
+{
+ /* Complete jobs waiting on the same event */
+ struct kbase_context *kctx = katom->kctx;
+
+ if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ return;
+ }
+
+ if (new_status == BASE_JD_SOFT_EVENT_SET)
+ kbasep_complete_triggered_soft_events(kctx, katom->jc);
+}
+
+/**
+ * kbase_soft_event_update() - Update soft event state
+ * @kctx: Pointer to context
+ * @event: Event to update
+ * @new_status: New status value of event
+ *
+ * Update the event, and wake up any atoms waiting for the event.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int kbase_soft_event_update(struct kbase_context *kctx,
+ u64 event,
+ unsigned char new_status)
+{
+ int err = 0;
+
+ mutex_lock(&kctx->jctx.lock);
+
+ if (kbasep_write_soft_event_status(kctx, event, new_status)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (new_status == BASE_JD_SOFT_EVENT_SET)
+ kbasep_complete_triggered_soft_events(kctx, event);
+
+out:
+ mutex_unlock(&kctx->jctx.lock);
+
+ return err;
+}
+
+static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
+{
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ if (jd_done_nolock(katom, NULL))
+ kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+struct kbase_debug_copy_buffer {
+ size_t size;
+ struct page **pages;
+ int nr_pages;
+ size_t offset;
+ struct kbase_mem_phy_alloc *gpu_alloc;
+
+ struct page **extres_pages;
+ int nr_extres_pages;
+};
+
+static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
+{
+ struct page **pages = buffer->extres_pages;
+ int nr_pages = buffer->nr_extres_pages;
+
+ if (pages) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *pg = pages[i];
+
+ if (pg)
+ put_page(pg);
+ }
+ kfree(pages);
+ }
+}
+
+static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
+{
+ struct kbase_debug_copy_buffer *buffers =
+ (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
+ unsigned int i;
+ unsigned int nr = katom->nr_extres;
+
+ if (!buffers)
+ return;
+
+ kbase_gpu_vm_lock(katom->kctx);
+ for (i = 0; i < nr; i++) {
+ int p;
+ struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
+
+ if (!buffers[i].pages)
+ break;
+ for (p = 0; p < buffers[i].nr_pages; p++) {
+ struct page *pg = buffers[i].pages[p];
+
+ if (pg)
+ put_page(pg);
+ }
+ kfree(buffers[i].pages);
+ if (gpu_alloc) {
+ switch (gpu_alloc->type) {
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ {
+ free_user_buffer(&buffers[i]);
+ break;
+ }
+ default:
+ /* Nothing to be done. */
+ break;
+ }
+ kbase_mem_phy_alloc_put(gpu_alloc);
+ }
+ }
+ kbase_gpu_vm_unlock(katom->kctx);
+ kfree(buffers);
+
+ katom->jc = 0;
+}
+
+static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
+{
+ struct kbase_debug_copy_buffer *buffers;
+ struct base_jd_debug_copy_buffer *user_buffers = NULL;
+ unsigned int i;
+ unsigned int nr = katom->nr_extres;
+ int ret = 0;
+ void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
+
+ if (!user_structs)
+ return -EINVAL;
+
+ buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
+ if (!buffers) {
+ ret = -ENOMEM;
+ katom->jc = 0;
+ goto out_cleanup;
+ }
+ katom->jc = (u64)(uintptr_t)buffers;
+
+ user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
+
+ if (!user_buffers) {
+ ret = -ENOMEM;
+ goto out_cleanup;
+ }
+
+ ret = copy_from_user(user_buffers, user_structs,
+ sizeof(*user_buffers)*nr);
+ if (ret)
+ goto out_cleanup;
+
+ for (i = 0; i < nr; i++) {
+ u64 addr = user_buffers[i].address;
+ u64 page_addr = addr & PAGE_MASK;
+ u64 end_page_addr = addr + user_buffers[i].size - 1;
+ u64 last_page_addr = end_page_addr & PAGE_MASK;
+ int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
+ int pinned_pages;
+ struct kbase_va_region *reg;
+ struct base_external_resource user_extres;
+
+ if (!addr)
+ continue;
+
+ buffers[i].nr_pages = nr_pages;
+ buffers[i].offset = addr & ~PAGE_MASK;
+ if (buffers[i].offset >= PAGE_SIZE) {
+ ret = -EINVAL;
+ goto out_cleanup;
+ }
+ buffers[i].size = user_buffers[i].size;
+
+ buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!buffers[i].pages) {
+ ret = -ENOMEM;
+ goto out_cleanup;
+ }
+
+ pinned_pages = get_user_pages_fast(page_addr,
+ nr_pages,
+ 1, /* Write */
+ buffers[i].pages);
+ if (pinned_pages < 0) {
+ ret = pinned_pages;
+ goto out_cleanup;
+ }
+ if (pinned_pages != nr_pages) {
+ ret = -EINVAL;
+ goto out_cleanup;
+ }
+
+ user_extres = user_buffers[i].extres;
+ if (user_extres.ext_resource == 0ULL) {
+ ret = -EINVAL;
+ goto out_cleanup;
+ }
+
+ kbase_gpu_vm_lock(katom->kctx);
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ katom->kctx, user_extres.ext_resource &
+ ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+
+ if (NULL == reg || NULL == reg->gpu_alloc ||
+ (reg->flags & KBASE_REG_FREE)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ buffers[i].nr_extres_pages = reg->nr_pages;
+
+ if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
+ dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
+
+ switch (reg->gpu_alloc->type) {
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ {
+ struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+ unsigned long nr_pages =
+ alloc->imported.user_buf.nr_pages;
+
+ if (alloc->imported.user_buf.mm != current->mm) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ buffers[i].extres_pages = kcalloc(nr_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!buffers[i].extres_pages) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ ret = get_user_pages_fast(
+ alloc->imported.user_buf.address,
+ nr_pages, 0,
+ buffers[i].extres_pages);
+ if (ret != nr_pages)
+ goto out_unlock;
+ ret = 0;
+ break;
+ }
+ case KBASE_MEM_TYPE_IMPORTED_UMP:
+ {
+ dev_warn(katom->kctx->kbdev->dev,
+ "UMP is not supported for debug_copy jobs\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ default:
+ /* Nothing to be done. */
+ break;
+ }
+ kbase_gpu_vm_unlock(katom->kctx);
+ }
+ kfree(user_buffers);
+
+ return ret;
+
+out_unlock:
+ kbase_gpu_vm_unlock(katom->kctx);
+
+out_cleanup:
+ kfree(buffers);
+ kfree(user_buffers);
+
+ /* Frees allocated memory for kbase_debug_copy_job struct, including
+ * members, and sets jc to 0 */
+ kbase_debug_copy_finish(katom);
+ return ret;
+}
+
+static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+ void *extres_page, struct page **pages, unsigned int nr_pages,
+ unsigned int *target_page_nr, size_t offset, size_t *to_copy)
+{
+ void *target_page = kmap(pages[*target_page_nr]);
+ size_t chunk = PAGE_SIZE-offset;
+
+ if (!target_page) {
+ *target_page_nr += 1;
+ dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
+ return;
+ }
+
+ chunk = min(chunk, *to_copy);
+
+ memcpy(target_page + offset, extres_page, chunk);
+ *to_copy -= chunk;
+
+ kunmap(pages[*target_page_nr]);
+
+ *target_page_nr += 1;
+ if (*target_page_nr >= nr_pages)
+ return;
+
+ target_page = kmap(pages[*target_page_nr]);
+ if (!target_page) {
+ *target_page_nr += 1;
+ dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
+ return;
+ }
+
+ KBASE_DEBUG_ASSERT(target_page);
+
+ chunk = min(offset, *to_copy);
+ memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
+ *to_copy -= chunk;
+
+ kunmap(pages[*target_page_nr]);
+}
+
+static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+ struct kbase_debug_copy_buffer *buf_data)
+{
+ unsigned int i;
+ unsigned int target_page_nr = 0;
+ struct page **pages = buf_data->pages;
+ u64 offset = buf_data->offset;
+ size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
+ size_t to_copy = min(extres_size, buf_data->size);
+ struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
+ int ret = 0;
+
+ KBASE_DEBUG_ASSERT(pages != NULL);
+
+ kbase_gpu_vm_lock(kctx);
+ if (!gpu_alloc) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ switch (gpu_alloc->type) {
+ case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+ {
+ for (i = 0; i < buf_data->nr_extres_pages; i++) {
+ struct page *pg = buf_data->extres_pages[i];
+ void *extres_page = kmap(pg);
+
+ if (extres_page)
+ kbase_mem_copy_from_extres_page(kctx,
+ extres_page, pages,
+ buf_data->nr_pages,
+ &target_page_nr,
+ offset, &to_copy);
+
+ kunmap(pg);
+ if (target_page_nr >= buf_data->nr_pages)
+ break;
+ }
+ break;
+ }
+ break;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case KBASE_MEM_TYPE_IMPORTED_UMM: {
+ struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
+
+ KBASE_DEBUG_ASSERT(dma_buf != NULL);
+ KBASE_DEBUG_ASSERT(dma_buf->size ==
+ buf_data->nr_extres_pages * PAGE_SIZE);
+
+ ret = dma_buf_begin_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
+ 0, buf_data->nr_extres_pages*PAGE_SIZE,
+#endif
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_unlock;
+
+ for (i = 0; i < buf_data->nr_extres_pages; i++) {
+
+ void *extres_page = dma_buf_kmap(dma_buf, i);
+
+ if (extres_page)
+ kbase_mem_copy_from_extres_page(kctx,
+ extres_page, pages,
+ buf_data->nr_pages,
+ &target_page_nr,
+ offset, &to_copy);
+
+ dma_buf_kunmap(dma_buf, i, extres_page);
+ if (target_page_nr >= buf_data->nr_pages)
+ break;
+ }
+ dma_buf_end_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
+ 0, buf_data->nr_extres_pages*PAGE_SIZE,
+#endif
+ DMA_FROM_DEVICE);
+ break;
+ }
+#endif
+ default:
+ ret = -EINVAL;
+ }
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return ret;
+
+}
+
+static int kbase_debug_copy(struct kbase_jd_atom *katom)
+{
+ struct kbase_debug_copy_buffer *buffers =
+ (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
+ unsigned int i;
+
+ for (i = 0; i < katom->nr_extres; i++) {
+ int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
+
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
+{
+ __user void *data = (__user void *)(uintptr_t) katom->jc;
+ struct base_jit_alloc_info *info;
+ int ret;
+
+ /* Fail the job if there is no info structure */
+ if (!data) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Copy the information for safe access and future storage */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (copy_from_user(info, data, sizeof(*info)) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /* If the ID is zero then fail the job */
+ if (info->id == 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /* Sanity check that the PA fits within the VA */
+ if (info->va_pages < info->commit_pages) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /* Ensure the GPU address is correctly aligned */
+ if ((info->gpu_alloc_addr & 0x7) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /* Replace the user pointer with our kernel allocated info structure */
+ katom->jc = (u64)(uintptr_t) info;
+
+ /*
+ * Note:
+ * The provided info->gpu_alloc_addr isn't validated here as
+ * userland can cache allocations which means that even
+ * though the region is valid it doesn't represent the
+ * same thing it used to.
+ *
+ * Complete validation of va_pages, commit_pages and extent
+ * isn't done here as it will be done during the call to
+ * kbase_mem_alloc.
+ */
+ return 0;
+
+free_info:
+ kfree(info);
+fail:
+ katom->jc = 0;
+ return ret;
+}
+
+static void kbase_jit_allocate_process(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct base_jit_alloc_info *info;
+ struct kbase_va_region *reg;
+ struct kbase_vmap_struct mapping;
+ u64 *ptr;
+
+ info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
+
+ /* The JIT ID is still in use so fail the allocation */
+ if (kctx->jit_alloc[info->id]) {
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return;
+ }
+
+ /*
+ * Mark the allocation so we know it's in use even if the
+ * allocation itself fails.
+ */
+ kctx->jit_alloc[info->id] = (struct kbase_va_region *) -1;
+
+ /* Create a JIT allocation */
+ reg = kbase_jit_allocate(kctx, info);
+ if (!reg) {
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return;
+ }
+
+ /*
+ * Write the address of the JIT allocation to the user provided
+ * GPU allocation.
+ */
+ ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
+ &mapping);
+ if (!ptr) {
+ /*
+ * Leave the allocation "live" as the JIT free jit will be
+ * submitted anyway.
+ */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ return;
+ }
+
+ *ptr = reg->start_pfn << PAGE_SHIFT;
+ kbase_vunmap(kctx, &mapping);
+
+ katom->event_code = BASE_JD_EVENT_DONE;
+
+ /*
+ * Bind it to the user provided ID. Do this last so we can check for
+ * the JIT free racing this JIT alloc job.
+ */
+ kctx->jit_alloc[info->id] = reg;
+}
+
+static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
+{
+ struct base_jit_alloc_info *info;
+
+ info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
+ /* Free the info structure */
+ kfree(info);
+}
+
+static void kbase_jit_free_process(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ u8 id = (u8) katom->jc;
+
+ /*
+ * If the ID is zero or it is not in use yet then fail the job.
+ */
+ if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ return;
+ }
+
+ /*
+ * If the ID is valid but the allocation request failed still succeed
+ * this soft job but don't try and free the allocation.
+ */
+ if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
+ kbase_jit_free(kctx, kctx->jit_alloc[id]);
+
+ kctx->jit_alloc[id] = NULL;
+}
+
+static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
+{
+ __user struct base_external_resource_list *user_ext_res;
+ struct base_external_resource_list *ext_res;
+ u64 count = 0;
+ size_t copy_size;
+ int ret;
+
+ user_ext_res = (__user struct base_external_resource_list *)
+ (uintptr_t) katom->jc;
+
+ /* Fail the job if there is no info structure */
+ if (!user_ext_res) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Is the number of external resources in range? */
+ if (!count || count > BASE_EXT_RES_COUNT_MAX) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Copy the information for safe access and future storage */
+ copy_size = sizeof(*ext_res);
+ copy_size += sizeof(struct base_external_resource) * (count - 1);
+ ext_res = kzalloc(copy_size, GFP_KERNEL);
+ if (!ext_res) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ /*
+ * Overwrite the count with the first value incase it was changed
+ * after the fact.
+ */
+ ext_res->count = count;
+
+ /*
+ * Replace the user pointer with our kernel allocated
+ * ext_res structure.
+ */
+ katom->jc = (u64)(uintptr_t) ext_res;
+
+ return 0;
+
+free_info:
+ kfree(ext_res);
+fail:
+ return ret;
+}
+
+static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
+{
+ struct base_external_resource_list *ext_res;
+ int i;
+ bool failed = false;
+
+ ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
+ if (!ext_res)
+ goto failed_jc;
+
+ kbase_gpu_vm_lock(katom->kctx);
+
+ for (i = 0; i < ext_res->count; i++) {
+ u64 gpu_addr;
+
+ gpu_addr = ext_res->ext_res[i].ext_resource &
+ ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
+ if (map) {
+ if (!kbase_sticky_resource_acquire(katom->kctx,
+ gpu_addr))
+ goto failed_loop;
+ } else
+ if (!kbase_sticky_resource_release(katom->kctx, NULL,
+ gpu_addr))
+ failed = true;
+ }
+
+ /*
+ * In the case of unmap we continue unmapping other resources in the
+ * case of failure but will always report failure if _any_ unmap
+ * request fails.
+ */
+ if (failed)
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ else
+ katom->event_code = BASE_JD_EVENT_DONE;
+
+ kbase_gpu_vm_unlock(katom->kctx);
+
+ return;
+
+failed_loop:
+ while (--i > 0) {
+ u64 gpu_addr;
+
+ gpu_addr = ext_res->ext_res[i].ext_resource &
+ ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
+
+ kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
+ }
+
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ kbase_gpu_vm_unlock(katom->kctx);
+
+failed_jc:
+ return;
+}
+
+static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
+{
+ struct base_external_resource_list *ext_res;
+
+ ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
+ /* Free the info structure */
+ kfree(ext_res);
+}
+
+int kbase_process_soft_job(struct kbase_jd_atom *katom)
+{
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+ return kbase_dump_cpu_gpu_time(katom);
+#ifdef CONFIG_SYNC
+ case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+ KBASE_DEBUG_ASSERT(katom->fence != NULL);
+ katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
+ /* Release the reference as we don't need it any more */
+ sync_fence_put(katom->fence);
+ katom->fence = NULL;
+ break;
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ return kbase_fence_wait(katom);
+#endif /* CONFIG_SYNC */
+ case BASE_JD_REQ_SOFT_REPLAY:
+ return kbase_replay_process(katom);
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ return kbasep_soft_event_wait(katom);
+ case BASE_JD_REQ_SOFT_EVENT_SET:
+ kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
+ break;
+ case BASE_JD_REQ_SOFT_EVENT_RESET:
+ kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
+ break;
+ case BASE_JD_REQ_SOFT_DEBUG_COPY:
+ {
+ int res = kbase_debug_copy(katom);
+
+ if (res)
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ break;
+ }
+ case BASE_JD_REQ_SOFT_JIT_ALLOC:
+ return -EINVAL; /* Temporarily disabled */
+ kbase_jit_allocate_process(katom);
+ break;
+ case BASE_JD_REQ_SOFT_JIT_FREE:
+ return -EINVAL; /* Temporarily disabled */
+ kbase_jit_free_process(katom);
+ break;
+ case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+ kbase_ext_res_process(katom, true);
+ break;
+ case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+ kbase_ext_res_process(katom, false);
+ break;
+ }
+
+ /* Atom is complete */
+ return 0;
+}
+
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
+{
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+#ifdef CONFIG_SYNC
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ kbase_fence_cancel_wait(katom);
+ break;
+#endif
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ kbasep_soft_event_cancel_job(katom);
+ break;
+ default:
+ /* This soft-job doesn't support cancellation! */
+ KBASE_DEBUG_ASSERT(0);
+ }
+}
+
+int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
+{
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+ {
+ if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
+ return -EINVAL;
+ }
+ break;
+#ifdef CONFIG_SYNC
+ case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+ {
+ struct base_fence fence;
+ int fd;
+
+ if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
+ return -EINVAL;
+
+ fd = kbase_stream_create_fence(fence.basep.stream_fd);
+ if (fd < 0)
+ return -EINVAL;
+
+ katom->fence = sync_fence_fdget(fd);
+
+ if (katom->fence == NULL) {
+ /* The only way the fence can be NULL is if userspace closed it for us.
+ * So we don't need to clear it up */
+ return -EINVAL;
+ }
+ fence.basep.fd = fd;
+ if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
+ katom->fence = NULL;
+ sys_close(fd);
+ return -EINVAL;
+ }
+ }
+ break;
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ {
+ struct base_fence fence;
+
+ if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
+ return -EINVAL;
+
+ /* Get a reference to the fence object */
+ katom->fence = sync_fence_fdget(fence.basep.fd);
+ if (katom->fence == NULL)
+ return -EINVAL;
+ }
+ break;
+#endif /* CONFIG_SYNC */
+ case BASE_JD_REQ_SOFT_JIT_ALLOC:
+ return kbase_jit_allocate_prepare(katom);
+ case BASE_JD_REQ_SOFT_REPLAY:
+ case BASE_JD_REQ_SOFT_JIT_FREE:
+ break;
+ case BASE_JD_REQ_SOFT_EVENT_WAIT:
+ case BASE_JD_REQ_SOFT_EVENT_SET:
+ case BASE_JD_REQ_SOFT_EVENT_RESET:
+ if (katom->jc == 0)
+ return -EINVAL;
+ break;
+ case BASE_JD_REQ_SOFT_DEBUG_COPY:
+ return kbase_debug_copy_prepare(katom);
+ case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+ return kbase_ext_res_prepare(katom);
+ case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+ return kbase_ext_res_prepare(katom);
+ default:
+ /* Unsupported soft-job */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void kbase_finish_soft_job(struct kbase_jd_atom *katom)
+{
+ switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+ case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+ /* Nothing to do */
+ break;
+#ifdef CONFIG_SYNC
+ case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+ /* If fence has not yet been signalled, do it now */
+ if (katom->fence) {
+ kbase_fence_trigger(katom, katom->event_code ==
+ BASE_JD_EVENT_DONE ? 0 : -EFAULT);
+ sync_fence_put(katom->fence);
+ katom->fence = NULL;
+ }
+ break;
+ case BASE_JD_REQ_SOFT_FENCE_WAIT:
+ /* Release the reference to the fence object */
+ sync_fence_put(katom->fence);
+ katom->fence = NULL;
+ break;
+#endif /* CONFIG_SYNC */
+
+ case BASE_JD_REQ_SOFT_DEBUG_COPY:
+ kbase_debug_copy_finish(katom);
+ break;
+ case BASE_JD_REQ_SOFT_JIT_ALLOC:
+ kbase_jit_allocate_finish(katom);
+ break;
+ case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+ kbase_ext_res_finish(katom);
+ break;
+ case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+ kbase_ext_res_finish(katom);
+ break;
+ }
+}
+
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
+{
+ LIST_HEAD(local_suspended_soft_jobs);
+ struct kbase_jd_atom *tmp_iter;
+ struct kbase_jd_atom *katom_iter;
+ struct kbasep_js_device_data *js_devdata;
+ bool resched = false;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ js_devdata = &kbdev->js_data;
+
+ /* Move out the entire list */
+ mutex_lock(&js_devdata->runpool_mutex);
+ list_splice_init(&js_devdata->suspended_soft_jobs_list,
+ &local_suspended_soft_jobs);
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /*
+ * Each atom must be detached from the list and ran separately -
+ * it could be re-added to the old list, but this is unlikely
+ */
+ list_for_each_entry_safe(katom_iter, tmp_iter,
+ &local_suspended_soft_jobs, dep_item[1]) {
+ struct kbase_context *kctx = katom_iter->kctx;
+
+ mutex_lock(&kctx->jctx.lock);
+
+ /* Remove from the global list */
+ list_del(&katom_iter->dep_item[1]);
+ /* Remove from the context's list of waiting soft jobs */
+ kbasep_remove_waiting_soft_job(katom_iter);
+
+ if (kbase_process_soft_job(katom_iter) == 0) {
+ kbase_finish_soft_job(katom_iter);
+ resched |= jd_done_nolock(katom_iter, NULL);
+ } else {
+ KBASE_DEBUG_ASSERT((katom_iter->core_req &
+ BASE_JD_REQ_SOFT_JOB_TYPE)
+ != BASE_JD_REQ_SOFT_REPLAY);
+ }
+
+ mutex_unlock(&kctx->jctx.lock);
+ }
+
+ if (resched)
+ kbase_js_sched_all(kbdev);
+}
diff --git a/midgard-0.r2p0/mali_kbase_strings.c b/midgard-0.r2p0/mali_kbase_strings.c
new file mode 100755
index 0000000..c98762c
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_strings.c
@@ -0,0 +1,23 @@
+ /*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#include "mali_kbase_strings.h"
+
+#define KBASE_DRV_NAME "mali"
+#define KBASE_TIMELINE_NAME KBASE_DRV_NAME ".timeline"
+
+const char kbase_drv_name[] = KBASE_DRV_NAME;
+const char kbase_timeline_name[] = KBASE_TIMELINE_NAME;
diff --git a/midgard-0.r2p0/mali_kbase_strings.h b/midgard-0.r2p0/mali_kbase_strings.h
new file mode 100755
index 0000000..41b8fdb
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_strings.h
@@ -0,0 +1,19 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+extern const char kbase_drv_name[];
+extern const char kbase_timeline_name[];
diff --git a/midgard-0.r2p0/mali_kbase_sync.c b/midgard-0.r2p0/mali_kbase_sync.c
new file mode 100755
index 0000000..c5fb981
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_sync.c
@@ -0,0 +1,182 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_SYNC
+
+#include <linux/seq_file.h>
+#include "sync.h"
+#include <mali_kbase.h>
+#include <mali_kbase_sync.h>
+
+struct mali_sync_timeline {
+ struct sync_timeline timeline;
+ atomic_t counter;
+ atomic_t signalled;
+};
+
+struct mali_sync_pt {
+ struct sync_pt pt;
+ int order;
+ int result;
+};
+
+static struct mali_sync_timeline *to_mali_sync_timeline(struct sync_timeline *timeline)
+{
+ return container_of(timeline, struct mali_sync_timeline, timeline);
+}
+
+static struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+ return container_of(pt, struct mali_sync_pt, pt);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_pt *new_mpt;
+ struct sync_pt *new_pt = sync_pt_create(sync_pt_parent(pt), sizeof(struct mali_sync_pt));
+
+ if (!new_pt)
+ return NULL;
+
+ new_mpt = to_mali_sync_pt(new_pt);
+ new_mpt->order = mpt->order;
+ new_mpt->result = mpt->result;
+
+ return new_pt;
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(sync_pt_parent(pt));
+ int result = mpt->result;
+
+ int diff = atomic_read(&mtl->signalled) - mpt->order;
+
+ if (diff >= 0)
+ return (result < 0) ? result : 1;
+
+ return 0;
+}
+
+static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
+{
+ struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt);
+ struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt);
+
+ int diff = ma->order - mb->order;
+
+ if (diff == 0)
+ return 0;
+
+ return (diff < 0) ? -1 : 1;
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str,
+ int size)
+{
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(timeline);
+
+ snprintf(str, size, "%d", atomic_read(&mtl->signalled));
+}
+
+static void pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+
+ snprintf(str, size, "%d(%d)", mpt->order, mpt->result);
+}
+
+static struct sync_timeline_ops mali_timeline_ops = {
+ .driver_name = "Mali",
+ .dup = timeline_dup,
+ .has_signaled = timeline_has_signaled,
+ .compare = timeline_compare,
+ .timeline_value_str = timeline_value_str,
+ .pt_value_str = pt_value_str,
+};
+
+int kbase_sync_timeline_is_ours(struct sync_timeline *timeline)
+{
+ return timeline->ops == &mali_timeline_ops;
+}
+
+struct sync_timeline *kbase_sync_timeline_alloc(const char *name)
+{
+ struct sync_timeline *tl;
+ struct mali_sync_timeline *mtl;
+
+ tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline), name);
+ if (!tl)
+ return NULL;
+
+ /* Set the counter in our private struct */
+ mtl = to_mali_sync_timeline(tl);
+ atomic_set(&mtl->counter, 0);
+ atomic_set(&mtl->signalled, 0);
+
+ return tl;
+}
+
+struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent)
+{
+ struct sync_pt *pt = sync_pt_create(parent, sizeof(struct mali_sync_pt));
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent);
+ struct mali_sync_pt *mpt;
+
+ if (!pt)
+ return NULL;
+
+ mpt = to_mali_sync_pt(pt);
+ mpt->order = atomic_inc_return(&mtl->counter);
+ mpt->result = 0;
+
+ return pt;
+}
+
+void kbase_sync_signal_pt(struct sync_pt *pt, int result)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(sync_pt_parent(pt));
+ int signalled;
+ int diff;
+
+ mpt->result = result;
+
+ do {
+ signalled = atomic_read(&mtl->signalled);
+
+ diff = signalled - mpt->order;
+
+ if (diff > 0) {
+ /* The timeline is already at or ahead of this point.
+ * This should not happen unless userspace has been
+ * signalling fences out of order, so warn but don't
+ * violate the sync_pt API.
+ * The warning is only in debug builds to prevent
+ * a malicious user being able to spam dmesg.
+ */
+#ifdef CONFIG_MALI_DEBUG
+ pr_err("Fences were triggered in a different order to allocation!");
+#endif /* CONFIG_MALI_DEBUG */
+ return;
+ }
+ } while (atomic_cmpxchg(&mtl->signalled, signalled, mpt->order) != signalled);
+}
+
+#endif /* CONFIG_SYNC */
diff --git a/midgard-0.r2p0/mali_kbase_sync.h b/midgard-0.r2p0/mali_kbase_sync.h
new file mode 100755
index 0000000..820bddc
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_sync.h
@@ -0,0 +1,100 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_sync.h
+ *
+ */
+
+#ifndef MALI_KBASE_SYNC_H
+#define MALI_KBASE_SYNC_H
+
+#include "sync.h"
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+/* For backwards compatiblility with kernels before 3.17. After 3.17
+ * sync_pt_parent is included in the kernel. */
+static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+{
+ return pt->parent;
+}
+#endif
+
+static inline int kbase_fence_get_status(struct sync_fence *fence)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ return fence->status;
+#else
+ return atomic_read(&fence->status);
+#endif
+}
+
+/*
+ * Create a stream object.
+ * Built on top of timeline object.
+ * Exposed as a file descriptor.
+ * Life-time controlled via the file descriptor:
+ * - dup to add a ref
+ * - close to remove a ref
+ */
+int kbase_stream_create(const char *name, int *const out_fd);
+
+/*
+ * Create a fence in a stream object
+ */
+int kbase_stream_create_fence(int tl_fd);
+
+/*
+ * Validate a fd to be a valid fence
+ * No reference is taken.
+ *
+ * This function is only usable to catch unintentional user errors early,
+ * it does not stop malicious code changing the fd after this function returns.
+ */
+int kbase_fence_validate(int fd);
+
+/* Returns true if the specified timeline is allocated by Mali */
+int kbase_sync_timeline_is_ours(struct sync_timeline *timeline);
+
+/* Allocates a timeline for Mali
+ *
+ * One timeline should be allocated per API context.
+ */
+struct sync_timeline *kbase_sync_timeline_alloc(const char *name);
+
+/* Allocates a sync point within the timeline.
+ *
+ * The timeline must be the one allocated by kbase_sync_timeline_alloc
+ *
+ * Sync points must be triggered in *exactly* the same order as they are allocated.
+ */
+struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent);
+
+/* Signals a particular sync point
+ *
+ * Sync points must be triggered in *exactly* the same order as they are allocated.
+ *
+ * If they are signalled in the wrong order then a message will be printed in debug
+ * builds and otherwise attempts to signal order sync_pts will be ignored.
+ *
+ * result can be negative to indicate error, any other value is interpreted as success.
+ */
+void kbase_sync_signal_pt(struct sync_pt *pt, int result);
+
+#endif
diff --git a/midgard-0.r2p0/mali_kbase_sync_user.c b/midgard-0.r2p0/mali_kbase_sync_user.c
new file mode 100755
index 0000000..b9baa91
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_sync_user.c
@@ -0,0 +1,156 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_sync_user.c
+ *
+ */
+
+#ifdef CONFIG_SYNC
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <mali_kbase_sync.h>
+
+static int kbase_stream_close(struct inode *inode, struct file *file)
+{
+ struct sync_timeline *tl;
+
+ tl = (struct sync_timeline *)file->private_data;
+ BUG_ON(!tl);
+ sync_timeline_destroy(tl);
+ return 0;
+}
+
+static const struct file_operations stream_fops = {
+ .owner = THIS_MODULE,
+ .release = kbase_stream_close,
+};
+
+int kbase_stream_create(const char *name, int *const out_fd)
+{
+ struct sync_timeline *tl;
+
+ BUG_ON(!out_fd);
+
+ tl = kbase_sync_timeline_alloc(name);
+ if (!tl)
+ return -EINVAL;
+
+ *out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY | O_CLOEXEC);
+
+ if (*out_fd < 0) {
+ sync_timeline_destroy(tl);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kbase_stream_create_fence(int tl_fd)
+{
+ struct sync_timeline *tl;
+ struct sync_pt *pt;
+ struct sync_fence *fence;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+ struct files_struct *files;
+ struct fdtable *fdt;
+#endif
+ int fd;
+ struct file *tl_file;
+
+ tl_file = fget(tl_fd);
+ if (tl_file == NULL)
+ return -EBADF;
+
+ if (tl_file->f_op != &stream_fops) {
+ fd = -EBADF;
+ goto out;
+ }
+
+ tl = tl_file->private_data;
+
+ pt = kbase_sync_pt_alloc(tl);
+ if (!pt) {
+ fd = -EFAULT;
+ goto out;
+ }
+
+ fence = sync_fence_create("mali_fence", pt);
+ if (!fence) {
+ sync_pt_free(pt);
+ fd = -EFAULT;
+ goto out;
+ }
+
+ /* from here the fence owns the sync_pt */
+
+ /* create a fd representing the fence */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ sync_fence_put(fence);
+ goto out;
+ }
+#else
+ fd = get_unused_fd();
+ if (fd < 0) {
+ sync_fence_put(fence);
+ goto out;
+ }
+
+ files = current->files;
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ __set_close_on_exec(fd, fdt);
+#else
+ FD_SET(fd, fdt->close_on_exec);
+#endif
+ spin_unlock(&files->file_lock);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */
+
+ /* bind fence to the new fd */
+ sync_fence_install(fence, fd);
+
+ out:
+ fput(tl_file);
+
+ return fd;
+}
+
+int kbase_fence_validate(int fd)
+{
+ struct sync_fence *fence;
+
+ fence = sync_fence_fdget(fd);
+ if (!fence)
+ return -EINVAL;
+
+ sync_fence_put(fence);
+ return 0;
+}
+
+#endif /* CONFIG_SYNC */
diff --git a/midgard-0.r2p0/mali_kbase_tlstream.c b/midgard-0.r2p0/mali_kbase_tlstream.c
new file mode 100755
index 0000000..4c1535f
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_tlstream.c
@@ -0,0 +1,2342 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
+#include <linux/file.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_tlstream.h>
+
+/*****************************************************************************/
+
+/* The version of swtrace protocol used in timeline stream. */
+#define SWTRACE_VERSION 3
+
+/* The maximum expected length of string in tracepoint descriptor. */
+#define STRLEN_MAX 64 /* bytes */
+
+/* The number of nanoseconds in a second. */
+#define NSECS_IN_SEC 1000000000ull /* ns */
+
+/* The period of autoflush checker execution in milliseconds. */
+#define AUTOFLUSH_INTERVAL 1000 /* ms */
+
+/* The maximum size of a single packet used by timeline. */
+#define PACKET_SIZE 4096 /* bytes */
+
+/* The number of packets used by one timeline stream. */
+#define PACKET_COUNT 16
+
+/* The number of bytes reserved for packet header.
+ * These value must be defined according to MIPE documentation. */
+#define PACKET_HEADER_SIZE 8 /* bytes */
+
+/* The number of bytes reserved for packet sequence number.
+ * These value must be defined according to MIPE documentation. */
+#define PACKET_NUMBER_SIZE 4 /* bytes */
+
+/* Packet header - first word.
+ * These values must be defined according to MIPE documentation. */
+#define PACKET_STREAMID_POS 0
+#define PACKET_STREAMID_LEN 8
+#define PACKET_RSVD1_POS (PACKET_STREAMID_POS + PACKET_STREAMID_LEN)
+#define PACKET_RSVD1_LEN 8
+#define PACKET_TYPE_POS (PACKET_RSVD1_POS + PACKET_RSVD1_LEN)
+#define PACKET_TYPE_LEN 3
+#define PACKET_CLASS_POS (PACKET_TYPE_POS + PACKET_TYPE_LEN)
+#define PACKET_CLASS_LEN 7
+#define PACKET_FAMILY_POS (PACKET_CLASS_POS + PACKET_CLASS_LEN)
+#define PACKET_FAMILY_LEN 6
+
+/* Packet header - second word
+ * These values must be defined according to MIPE documentation. */
+#define PACKET_LENGTH_POS 0
+#define PACKET_LENGTH_LEN 24
+#define PACKET_SEQBIT_POS (PACKET_LENGTH_POS + PACKET_LENGTH_LEN)
+#define PACKET_SEQBIT_LEN 1
+#define PACKET_RSVD2_POS (PACKET_SEQBIT_POS + PACKET_SEQBIT_LEN)
+#define PACKET_RSVD2_LEN 7
+
+/* Types of streams generated by timeline.
+ * Order is significant! Header streams must precede respective body streams. */
+enum tl_stream_type {
+ TL_STREAM_TYPE_OBJ_HEADER,
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ TL_STREAM_TYPE_OBJ,
+ TL_STREAM_TYPE_AUX_HEADER,
+ TL_STREAM_TYPE_AUX,
+
+ TL_STREAM_TYPE_COUNT
+};
+
+/* Timeline packet family ids.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_family {
+ TL_PACKET_FAMILY_CTRL = 0, /* control packets */
+ TL_PACKET_FAMILY_TL = 1, /* timeline packets */
+
+ TL_PACKET_FAMILY_COUNT
+};
+
+/* Packet classes used in timeline streams.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_class {
+ TL_PACKET_CLASS_OBJ = 0, /* timeline objects packet */
+ TL_PACKET_CLASS_AUX = 1, /* auxiliary events packet */
+};
+
+/* Packet types used in timeline streams.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_type {
+ TL_PACKET_TYPE_HEADER = 0, /* stream's header/directory */
+ TL_PACKET_TYPE_BODY = 1, /* stream's body */
+ TL_PACKET_TYPE_SUMMARY = 2, /* stream's summary */
+};
+
+/* Message ids of trace events that are recorded in the timeline stream. */
+enum tl_msg_id_obj {
+ /* Timeline object events. */
+ KBASE_TL_NEW_CTX,
+ KBASE_TL_NEW_GPU,
+ KBASE_TL_NEW_LPU,
+ KBASE_TL_NEW_ATOM,
+ KBASE_TL_NEW_AS,
+ KBASE_TL_DEL_CTX,
+ KBASE_TL_DEL_ATOM,
+ KBASE_TL_LIFELINK_LPU_GPU,
+ KBASE_TL_LIFELINK_AS_GPU,
+ KBASE_TL_RET_CTX_LPU,
+ KBASE_TL_RET_ATOM_CTX,
+ KBASE_TL_RET_ATOM_LPU,
+ KBASE_TL_NRET_CTX_LPU,
+ KBASE_TL_NRET_ATOM_CTX,
+ KBASE_TL_NRET_ATOM_LPU,
+ KBASE_TL_RET_AS_CTX,
+ KBASE_TL_NRET_AS_CTX,
+ KBASE_TL_RET_ATOM_AS,
+ KBASE_TL_NRET_ATOM_AS,
+ KBASE_TL_DEP_ATOM_ATOM,
+ KBASE_TL_NDEP_ATOM_ATOM,
+ KBASE_TL_RDEP_ATOM_ATOM,
+ KBASE_TL_ATTRIB_ATOM_CONFIG,
+ KBASE_TL_ATTRIB_ATOM_PRIORITY,
+ KBASE_TL_ATTRIB_ATOM_STATE,
+ KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE,
+ KBASE_TL_ATTRIB_AS_CONFIG,
+ KBASE_TL_EVENT_LPU_SOFTSTOP,
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+
+ /* Job dump specific events. */
+ KBASE_JD_GPU_SOFT_RESET
+};
+
+/* Message ids of trace events that are recorded in the auxiliary stream. */
+enum tl_msg_id_aux {
+ KBASE_AUX_PM_STATE,
+ KBASE_AUX_PAGEFAULT,
+ KBASE_AUX_PAGESALLOC,
+ KBASE_AUX_DEVFREQ_TARGET
+};
+
+/*****************************************************************************/
+
+/**
+ * struct tl_stream - timeline stream structure
+ * @lock: message order lock
+ * @buffer: array of buffers
+ * @wbi: write buffer index
+ * @rbi: read buffer index
+ * @numbered: if non-zero stream's packets are sequentially numbered
+ * @autoflush_counter: counter tracking stream's autoflush state
+ *
+ * This structure holds information needed to construct proper packets in the
+ * timeline stream. Each message in sequence must bear timestamp that is greater
+ * to one in previous message in the same stream. For this reason lock is held
+ * throughout the process of message creation. Each stream contains set of
+ * buffers. Each buffer will hold one MIPE packet. In case there is no free
+ * space required to store incoming message the oldest buffer is discarded.
+ * Each packet in timeline body stream has sequence number embedded (this value
+ * must increment monotonically and is used by packets receiver to discover
+ * buffer overflows.
+ * Autoflush counter is set to negative number when there is no data pending
+ * for flush and it is set to zero on every update of the buffer. Autoflush
+ * timer will increment the counter by one on every expiry. In case there will
+ * be no activity on the buffer during two consecutive timer expiries, stream
+ * buffer will be flushed.
+ */
+struct tl_stream {
+ spinlock_t lock;
+
+ struct {
+ atomic_t size; /* number of bytes in buffer */
+ char data[PACKET_SIZE]; /* buffer's data */
+ } buffer[PACKET_COUNT];
+
+ atomic_t wbi;
+ atomic_t rbi;
+
+ int numbered;
+ atomic_t autoflush_counter;
+};
+
+/**
+ * struct tp_desc - tracepoint message descriptor structure
+ * @id: tracepoint ID identifying message in stream
+ * @id_str: human readable version of tracepoint ID
+ * @name: tracepoint description
+ * @arg_types: tracepoint's arguments types declaration
+ * @arg_names: comma separated list of tracepoint's arguments names
+ */
+struct tp_desc {
+ u32 id;
+ const char *id_str;
+ const char *name;
+ const char *arg_types;
+ const char *arg_names;
+};
+
+/*****************************************************************************/
+
+/* Configuration of timeline streams generated by kernel.
+ * Kernel emit only streams containing either timeline object events or
+ * auxiliary events. All streams have stream id value of 1 (as opposed to user
+ * space streams that have value of 0). */
+static const struct {
+ enum tl_packet_family pkt_family;
+ enum tl_packet_class pkt_class;
+ enum tl_packet_type pkt_type;
+ unsigned int stream_id;
+} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = {
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_HEADER, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_HEADER, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY, 1}
+};
+
+/* The timeline streams generated by kernel. */
+static struct tl_stream *tl_stream[TL_STREAM_TYPE_COUNT];
+
+/* Autoflush timer. */
+static struct timer_list autoflush_timer;
+
+/* If non-zero autoflush timer is active. */
+static atomic_t autoflush_timer_active;
+
+/* Reader lock. Only one reader is allowed to have access to the timeline
+ * streams at any given time. */
+static DEFINE_MUTEX(tl_reader_lock);
+
+/* Timeline stream event queue. */
+static DECLARE_WAIT_QUEUE_HEAD(tl_event_queue);
+
+/* The timeline stream file operations functions. */
+static ssize_t kbasep_tlstream_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos);
+static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait);
+static int kbasep_tlstream_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations kbasep_tlstream_fops = {
+ .release = kbasep_tlstream_release,
+ .read = kbasep_tlstream_read,
+ .poll = kbasep_tlstream_poll,
+};
+
+/* Descriptors of timeline messages transmitted in object events stream. */
+static const struct tp_desc tp_desc_obj[] = {
+ {
+ KBASE_TL_NEW_CTX,
+ __stringify(KBASE_TL_NEW_CTX),
+ "object ctx is created",
+ "@pII",
+ "ctx,ctx_nr,tgid"
+ },
+ {
+ KBASE_TL_NEW_GPU,
+ __stringify(KBASE_TL_NEW_GPU),
+ "object gpu is created",
+ "@pII",
+ "gpu,gpu_id,core_count"
+ },
+ {
+ KBASE_TL_NEW_LPU,
+ __stringify(KBASE_TL_NEW_LPU),
+ "object lpu is created",
+ "@pII",
+ "lpu,lpu_nr,lpu_fn"
+ },
+ {
+ KBASE_TL_NEW_ATOM,
+ __stringify(KBASE_TL_NEW_ATOM),
+ "object atom is created",
+ "@pI",
+ "atom,atom_nr"
+ },
+ {
+ KBASE_TL_NEW_AS,
+ __stringify(KBASE_TL_NEW_AS),
+ "address space object is created",
+ "@pI",
+ "address_space,as_nr"
+ },
+ {
+ KBASE_TL_DEL_CTX,
+ __stringify(KBASE_TL_DEL_CTX),
+ "context is destroyed",
+ "@p",
+ "ctx"
+ },
+ {
+ KBASE_TL_DEL_ATOM,
+ __stringify(KBASE_TL_DEL_ATOM),
+ "atom is destroyed",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_TL_LIFELINK_LPU_GPU,
+ __stringify(KBASE_TL_LIFELINK_LPU_GPU),
+ "lpu is deleted with gpu",
+ "@pp",
+ "lpu,gpu"
+ },
+ {
+ KBASE_TL_LIFELINK_AS_GPU,
+ __stringify(KBASE_TL_LIFELINK_AS_GPU),
+ "address space is deleted with gpu",
+ "@pp",
+ "address_space,gpu"
+ },
+ {
+ KBASE_TL_RET_CTX_LPU,
+ __stringify(KBASE_TL_RET_CTX_LPU),
+ "context is retained by lpu",
+ "@pp",
+ "ctx,lpu"
+ },
+ {
+ KBASE_TL_RET_ATOM_CTX,
+ __stringify(KBASE_TL_RET_ATOM_CTX),
+ "atom is retained by context",
+ "@pp",
+ "atom,ctx"
+ },
+ {
+ KBASE_TL_RET_ATOM_LPU,
+ __stringify(KBASE_TL_RET_ATOM_LPU),
+ "atom is retained by lpu",
+ "@pps",
+ "atom,lpu,attrib_match_list"
+ },
+ {
+ KBASE_TL_NRET_CTX_LPU,
+ __stringify(KBASE_TL_NRET_CTX_LPU),
+ "context is released by lpu",
+ "@pp",
+ "ctx,lpu"
+ },
+ {
+ KBASE_TL_NRET_ATOM_CTX,
+ __stringify(KBASE_TL_NRET_ATOM_CTX),
+ "atom is released by context",
+ "@pp",
+ "atom,ctx"
+ },
+ {
+ KBASE_TL_NRET_ATOM_LPU,
+ __stringify(KBASE_TL_NRET_ATOM_LPU),
+ "atom is released by lpu",
+ "@pp",
+ "atom,lpu"
+ },
+ {
+ KBASE_TL_RET_AS_CTX,
+ __stringify(KBASE_TL_RET_AS_CTX),
+ "address space is retained by context",
+ "@pp",
+ "address_space,ctx"
+ },
+ {
+ KBASE_TL_NRET_AS_CTX,
+ __stringify(KBASE_TL_NRET_AS_CTX),
+ "address space is released by context",
+ "@pp",
+ "address_space,ctx"
+ },
+ {
+ KBASE_TL_RET_ATOM_AS,
+ __stringify(KBASE_TL_RET_ATOM_AS),
+ "atom is retained by address space",
+ "@pp",
+ "atom,address_space"
+ },
+ {
+ KBASE_TL_NRET_ATOM_AS,
+ __stringify(KBASE_TL_NRET_ATOM_AS),
+ "atom is released by address space",
+ "@pp",
+ "atom,address_space"
+ },
+ {
+ KBASE_TL_DEP_ATOM_ATOM,
+ __stringify(KBASE_TL_DEP_ATOM_ATOM),
+ "atom2 depends on atom1",
+ "@pp",
+ "atom1,atom2"
+ },
+ {
+ KBASE_TL_NDEP_ATOM_ATOM,
+ __stringify(KBASE_TL_NDEP_ATOM_ATOM),
+ "atom2 no longer depends on atom1",
+ "@pp",
+ "atom1,atom2"
+ },
+ {
+ KBASE_TL_RDEP_ATOM_ATOM,
+ __stringify(KBASE_TL_RDEP_ATOM_ATOM),
+ "resolved dependecy of atom2 depending on atom1",
+ "@pp",
+ "atom1,atom2"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_CONFIG,
+ __stringify(KBASE_TL_ATTRIB_ATOM_CONFIG),
+ "atom job slot attributes",
+ "@pLLI",
+ "atom,descriptor,affinity,config"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_PRIORITY,
+ __stringify(KBASE_TL_ATTRIB_ATOM_PRIORITY),
+ "atom priority",
+ "@pI",
+ "atom,prio"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_STATE,
+ __stringify(KBASE_TL_ATTRIB_ATOM_STATE),
+ "atom state",
+ "@pI",
+ "atom,state"
+ },
+ {
+ KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE,
+ __stringify(KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE),
+ "atom caused priority change",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_TL_ATTRIB_AS_CONFIG,
+ __stringify(KBASE_TL_ATTRIB_AS_CONFIG),
+ "address space attributes",
+ "@pLLL",
+ "address_space,transtab,memattr,transcfg"
+ },
+ {
+ KBASE_TL_EVENT_LPU_SOFTSTOP,
+ __stringify(KBASE_TL_EVENT_LPU_SOFTSTOP),
+ "softstop event on given lpu",
+ "@p",
+ "lpu"
+ },
+ {
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+ __stringify(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX),
+ "atom softstopped",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+ __stringify(KBASE_TL_EVENT_SOFTSTOP_ISSUE),
+ "atom softstop issued",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_JD_GPU_SOFT_RESET,
+ __stringify(KBASE_JD_GPU_SOFT_RESET),
+ "gpu soft reset",
+ "@p",
+ "gpu"
+ },
+};
+
+/* Descriptors of timeline messages transmitted in auxiliary events stream. */
+static const struct tp_desc tp_desc_aux[] = {
+ {
+ KBASE_AUX_PM_STATE,
+ __stringify(KBASE_AUX_PM_STATE),
+ "PM state",
+ "@IL",
+ "core_type,core_state_bitset"
+ },
+ {
+ KBASE_AUX_PAGEFAULT,
+ __stringify(KBASE_AUX_PAGEFAULT),
+ "Page fault",
+ "@IL",
+ "ctx_nr,page_cnt_change"
+ },
+ {
+ KBASE_AUX_PAGESALLOC,
+ __stringify(KBASE_AUX_PAGESALLOC),
+ "Total alloc pages change",
+ "@IL",
+ "ctx_nr,page_cnt"
+ },
+ {
+ KBASE_AUX_DEVFREQ_TARGET,
+ __stringify(KBASE_AUX_DEVFREQ_TARGET),
+ "New device frequency target",
+ "@L",
+ "target_freq"
+ }
+};
+
+#if MALI_UNIT_TEST
+/* Number of bytes read by user. */
+static atomic_t tlstream_bytes_collected = {0};
+
+/* Number of bytes generated by tracepoint messages. */
+static atomic_t tlstream_bytes_generated = {0};
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+/* Indicator of whether the timeline stream file descriptor is used. */
+atomic_t kbase_tlstream_enabled = {0};
+
+/*****************************************************************************/
+
+/**
+ * kbasep_tlstream_get_timestamp - return timestamp
+ *
+ * Function returns timestamp value based on raw monotonic timer. Value will
+ * wrap around zero in case of overflow.
+ * Return: timestamp value
+ */
+static u64 kbasep_tlstream_get_timestamp(void)
+{
+ struct timespec ts;
+ u64 timestamp;
+
+ getrawmonotonic(&ts);
+ timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
+ return timestamp;
+}
+
+/**
+ * kbasep_tlstream_write_bytes - write data to message buffer
+ * @buffer: buffer where data will be written
+ * @pos: position in the buffer where to place data
+ * @bytes: pointer to buffer holding data
+ * @len: length of data to be written
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_bytes(
+ char *buffer,
+ size_t pos,
+ const void *bytes,
+ size_t len)
+{
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(bytes);
+
+ memcpy(&buffer[pos], bytes, len);
+
+ return pos + len;
+}
+
+/**
+ * kbasep_tlstream_write_string - write string to message buffer
+ * @buffer: buffer where data will be written
+ * @pos: position in the buffer where to place data
+ * @string: pointer to buffer holding the source string
+ * @max_write_size: number of bytes that can be stored in buffer
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_string(
+ char *buffer,
+ size_t pos,
+ const char *string,
+ size_t max_write_size)
+{
+ u32 string_len;
+
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(string);
+ /* Timeline string consists of at least string length and nul
+ * terminator. */
+ KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char));
+ max_write_size -= sizeof(string_len);
+
+ string_len = strlcpy(
+ &buffer[pos + sizeof(string_len)],
+ string,
+ max_write_size);
+ string_len += sizeof(char);
+
+ /* Make sure that the source string fit into the buffer. */
+ KBASE_DEBUG_ASSERT(string_len <= max_write_size);
+
+ /* Update string length. */
+ memcpy(&buffer[pos], &string_len, sizeof(string_len));
+
+ return pos + sizeof(string_len) + string_len;
+}
+
+/**
+ * kbasep_tlstream_write_timestamp - write timestamp to message buffer
+ * @buffer: buffer where data will be written
+ * @pos: position in the buffer where to place data
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_timestamp(void *buffer, size_t pos)
+{
+ u64 timestamp = kbasep_tlstream_get_timestamp();
+
+ return kbasep_tlstream_write_bytes(
+ buffer, pos,
+ &timestamp, sizeof(timestamp));
+}
+
+/**
+ * kbasep_tlstream_put_bits - put bits in a word
+ * @word: pointer to the words being modified
+ * @value: value that shall be written to given position
+ * @bitpos: position where value shall be written (in bits)
+ * @bitlen: length of value (in bits)
+ */
+static void kbasep_tlstream_put_bits(
+ u32 *word,
+ u32 value,
+ unsigned int bitpos,
+ unsigned int bitlen)
+{
+ const u32 mask = ((1 << bitlen) - 1) << bitpos;
+
+ KBASE_DEBUG_ASSERT(word);
+ KBASE_DEBUG_ASSERT((0 != bitlen) && (32 >= bitlen));
+ KBASE_DEBUG_ASSERT((bitpos + bitlen) <= 32);
+
+ *word &= ~mask;
+ *word |= ((value << bitpos) & mask);
+}
+
+/**
+ * kbasep_tlstream_packet_header_setup - setup the packet header
+ * @buffer: pointer to the buffer
+ * @pkt_family: packet's family
+ * @pkt_type: packet's type
+ * @pkt_class: packet's class
+ * @stream_id: stream id
+ * @numbered: non-zero if this stream is numbered
+ *
+ * Function sets up immutable part of packet header in the given buffer.
+ */
+static void kbasep_tlstream_packet_header_setup(
+ char *buffer,
+ enum tl_packet_family pkt_family,
+ enum tl_packet_class pkt_class,
+ enum tl_packet_type pkt_type,
+ unsigned int stream_id,
+ int numbered)
+{
+ u32 word0 = 0;
+ u32 word1 = 0;
+
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(pkt_family == TL_PACKET_FAMILY_TL);
+ KBASE_DEBUG_ASSERT(
+ (pkt_type == TL_PACKET_TYPE_HEADER) ||
+ (pkt_type == TL_PACKET_TYPE_SUMMARY) ||
+ (pkt_type == TL_PACKET_TYPE_BODY));
+ KBASE_DEBUG_ASSERT(
+ (pkt_class == TL_PACKET_CLASS_OBJ) ||
+ (pkt_class == TL_PACKET_CLASS_AUX));
+
+ kbasep_tlstream_put_bits(
+ &word0, pkt_family,
+ PACKET_FAMILY_POS, PACKET_FAMILY_LEN);
+ kbasep_tlstream_put_bits(
+ &word0, pkt_class,
+ PACKET_CLASS_POS, PACKET_CLASS_LEN);
+ kbasep_tlstream_put_bits(
+ &word0, pkt_type,
+ PACKET_TYPE_POS, PACKET_TYPE_LEN);
+ kbasep_tlstream_put_bits(
+ &word0, stream_id,
+ PACKET_STREAMID_POS, PACKET_STREAMID_LEN);
+
+ if (numbered)
+ kbasep_tlstream_put_bits(
+ &word1, 1,
+ PACKET_SEQBIT_POS, PACKET_SEQBIT_LEN);
+
+ memcpy(&buffer[0], &word0, sizeof(word0));
+ memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+}
+
+/**
+ * kbasep_tlstream_packet_header_update - update the packet header
+ * @buffer: pointer to the buffer
+ * @data_size: amount of data carried in this packet
+ *
+ * Function updates mutable part of packet header in the given buffer.
+ * Note that value of data_size must not including size of the header.
+ */
+static void kbasep_tlstream_packet_header_update(
+ char *buffer,
+ size_t data_size)
+{
+ u32 word0;
+ u32 word1;
+
+ KBASE_DEBUG_ASSERT(buffer);
+ CSTD_UNUSED(word0);
+
+ memcpy(&word1, &buffer[sizeof(word0)], sizeof(word1));
+
+ kbasep_tlstream_put_bits(
+ &word1, data_size,
+ PACKET_LENGTH_POS, PACKET_LENGTH_LEN);
+
+ memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+}
+
+/**
+ * kbasep_tlstream_packet_number_update - update the packet number
+ * @buffer: pointer to the buffer
+ * @counter: value of packet counter for this packet's stream
+ *
+ * Function updates packet number embedded within the packet placed in the
+ * given buffer.
+ */
+static void kbasep_tlstream_packet_number_update(char *buffer, u32 counter)
+{
+ KBASE_DEBUG_ASSERT(buffer);
+
+ memcpy(&buffer[PACKET_HEADER_SIZE], &counter, sizeof(counter));
+}
+
+/**
+ * kbasep_timeline_stream_reset - reset stream
+ * @stream: pointer to the stream structure
+ *
+ * Function discards all pending messages and resets packet counters.
+ */
+static void kbasep_timeline_stream_reset(struct tl_stream *stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < PACKET_COUNT; i++) {
+ if (stream->numbered)
+ atomic_set(
+ &stream->buffer[i].size,
+ PACKET_HEADER_SIZE +
+ PACKET_NUMBER_SIZE);
+ else
+ atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE);
+ }
+
+ atomic_set(&stream->wbi, 0);
+ atomic_set(&stream->rbi, 0);
+}
+
+/**
+ * kbasep_timeline_stream_init - initialize timeline stream
+ * @stream: pointer to the stream structure
+ * @stream_type: stream type
+ */
+static void kbasep_timeline_stream_init(
+ struct tl_stream *stream,
+ enum tl_stream_type stream_type)
+{
+ unsigned int i;
+
+ KBASE_DEBUG_ASSERT(stream);
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+
+ spin_lock_init(&stream->lock);
+
+ /* All packets carrying tracepoints shall be numbered. */
+ if (TL_PACKET_TYPE_BODY == tl_stream_cfg[stream_type].pkt_type)
+ stream->numbered = 1;
+ else
+ stream->numbered = 0;
+
+ for (i = 0; i < PACKET_COUNT; i++)
+ kbasep_tlstream_packet_header_setup(
+ stream->buffer[i].data,
+ tl_stream_cfg[stream_type].pkt_family,
+ tl_stream_cfg[stream_type].pkt_class,
+ tl_stream_cfg[stream_type].pkt_type,
+ tl_stream_cfg[stream_type].stream_id,
+ stream->numbered);
+
+ kbasep_timeline_stream_reset(tl_stream[stream_type]);
+}
+
+/**
+ * kbasep_timeline_stream_term - terminate timeline stream
+ * @stream: pointer to the stream structure
+ */
+static void kbasep_timeline_stream_term(struct tl_stream *stream)
+{
+ KBASE_DEBUG_ASSERT(stream);
+}
+
+/**
+ * kbasep_tlstream_msgbuf_submit - submit packet to the user space
+ * @stream: pointer to the stream structure
+ * @wb_idx_raw: write buffer index
+ * @wb_size: length of data stored in current buffer
+ *
+ * Function updates currently written buffer with packet header. Then write
+ * index is incremented and buffer is handled to user space. Parameters
+ * of new buffer are returned using provided arguments.
+ *
+ * Return: length of data in new buffer
+ *
+ * Warning: User must update the stream structure with returned value.
+ */
+static size_t kbasep_tlstream_msgbuf_submit(
+ struct tl_stream *stream,
+ unsigned int wb_idx_raw,
+ unsigned int wb_size)
+{
+ unsigned int rb_idx_raw = atomic_read(&stream->rbi);
+ unsigned int wb_idx = wb_idx_raw % PACKET_COUNT;
+
+ /* Set stream as flushed. */
+ atomic_set(&stream->autoflush_counter, -1);
+
+ kbasep_tlstream_packet_header_update(
+ stream->buffer[wb_idx].data,
+ wb_size - PACKET_HEADER_SIZE);
+
+ if (stream->numbered)
+ kbasep_tlstream_packet_number_update(
+ stream->buffer[wb_idx].data,
+ wb_idx_raw);
+
+ /* Increasing write buffer index will expose this packet to the reader.
+ * As stream->lock is not taken on reader side we must make sure memory
+ * is updated correctly before this will happen. */
+ smp_wmb();
+ wb_idx_raw++;
+ atomic_set(&stream->wbi, wb_idx_raw);
+
+ /* Inform user that packets are ready for reading. */
+ wake_up_interruptible(&tl_event_queue);
+
+ /* Detect and mark overflow in this stream. */
+ if (PACKET_COUNT == wb_idx_raw - rb_idx_raw) {
+ /* Reader side depends on this increment to correctly handle
+ * overflows. The value shall be updated only if it was not
+ * modified by the reader. The data holding buffer will not be
+ * updated before stream->lock is released, however size of the
+ * buffer will. Make sure this increment is globally visible
+ * before information about selected write buffer size. */
+ atomic_cmpxchg(&stream->rbi, rb_idx_raw, rb_idx_raw + 1);
+ }
+
+ wb_size = PACKET_HEADER_SIZE;
+ if (stream->numbered)
+ wb_size += PACKET_NUMBER_SIZE;
+
+ return wb_size;
+}
+
+/**
+ * kbasep_tlstream_msgbuf_acquire - lock selected stream and reserves buffer
+ * @stream_type: type of the stream that shall be locked
+ * @msg_size: message size
+ * @flags: pointer to store flags passed back on stream release
+ *
+ * Function will lock the stream and reserve the number of bytes requested
+ * in msg_size for the user.
+ *
+ * Return: pointer to the buffer where message can be stored
+ *
+ * Warning: Stream must be released with kbasep_tlstream_msgbuf_release().
+ * Only atomic operations are allowed while stream is locked
+ * (i.e. do not use any operation that may sleep).
+ */
+static char *kbasep_tlstream_msgbuf_acquire(
+ enum tl_stream_type stream_type,
+ size_t msg_size,
+ unsigned long *flags) __acquires(&stream->lock)
+{
+ struct tl_stream *stream;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+ KBASE_DEBUG_ASSERT(
+ PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >=
+ msg_size);
+
+ stream = tl_stream[stream_type];
+
+ spin_lock_irqsave(&stream->lock, *flags);
+
+ wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = atomic_read(&stream->buffer[wb_idx].size);
+
+ /* Select next buffer if data will not fit into current one. */
+ if (PACKET_SIZE < wb_size + msg_size) {
+ wb_size = kbasep_tlstream_msgbuf_submit(
+ stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ }
+
+ /* Reserve space in selected buffer. */
+ atomic_set(&stream->buffer[wb_idx].size, wb_size + msg_size);
+
+#if MALI_UNIT_TEST
+ atomic_add(msg_size, &tlstream_bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+ return &stream->buffer[wb_idx].data[wb_size];
+}
+
+/**
+ * kbasep_tlstream_msgbuf_release - unlock selected stream
+ * @stream_type: type of the stream that shall be locked
+ * @flags: value obtained during stream acquire
+ *
+ * Function releases stream that has been previously locked with a call to
+ * kbasep_tlstream_msgbuf_acquire().
+ */
+static void kbasep_tlstream_msgbuf_release(
+ enum tl_stream_type stream_type,
+ unsigned long flags) __releases(&stream->lock)
+{
+ struct tl_stream *stream;
+
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+
+ stream = tl_stream[stream_type];
+
+ /* Mark stream as containing unflushed data. */
+ atomic_set(&stream->autoflush_counter, 0);
+
+ spin_unlock_irqrestore(&stream->lock, flags);
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_tlstream_flush_stream - flush stream
+ * @stype: type of stream to be flushed
+ *
+ * Flush pending data in timeline stream.
+ */
+static void kbasep_tlstream_flush_stream(enum tl_stream_type stype)
+{
+ struct tl_stream *stream = tl_stream[stype];
+ unsigned long flags;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+ size_t min_size = PACKET_HEADER_SIZE;
+
+ if (stream->numbered)
+ min_size += PACKET_NUMBER_SIZE;
+
+ spin_lock_irqsave(&stream->lock, flags);
+
+ wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = atomic_read(&stream->buffer[wb_idx].size);
+
+ if (wb_size > min_size) {
+ wb_size = kbasep_tlstream_msgbuf_submit(
+ stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ atomic_set(&stream->buffer[wb_idx].size, wb_size);
+ }
+ spin_unlock_irqrestore(&stream->lock, flags);
+}
+
+/**
+ * kbasep_tlstream_autoflush_timer_callback - autoflush timer callback
+ * @data: unused
+ *
+ * Timer is executed periodically to check if any of the stream contains
+ * buffer ready to be submitted to user space.
+ */
+static void kbasep_tlstream_autoflush_timer_callback(unsigned long data)
+{
+ enum tl_stream_type stype;
+ int rcode;
+
+ CSTD_UNUSED(data);
+
+ for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) {
+ struct tl_stream *stream = tl_stream[stype];
+ unsigned long flags;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+ size_t min_size = PACKET_HEADER_SIZE;
+
+ int af_cnt = atomic_read(&stream->autoflush_counter);
+
+ /* Check if stream contain unflushed data. */
+ if (0 > af_cnt)
+ continue;
+
+ /* Check if stream should be flushed now. */
+ if (af_cnt != atomic_cmpxchg(
+ &stream->autoflush_counter,
+ af_cnt,
+ af_cnt + 1))
+ continue;
+ if (!af_cnt)
+ continue;
+
+ /* Autoflush this stream. */
+ if (stream->numbered)
+ min_size += PACKET_NUMBER_SIZE;
+
+ spin_lock_irqsave(&stream->lock, flags);
+
+ wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = atomic_read(&stream->buffer[wb_idx].size);
+
+ if (wb_size > min_size) {
+ wb_size = kbasep_tlstream_msgbuf_submit(
+ stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ atomic_set(&stream->buffer[wb_idx].size,
+ wb_size);
+ }
+ spin_unlock_irqrestore(&stream->lock, flags);
+ }
+
+ if (atomic_read(&autoflush_timer_active))
+ rcode = mod_timer(
+ &autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ CSTD_UNUSED(rcode);
+}
+
+/**
+ * kbasep_tlstream_packet_pending - check timeline streams for pending packets
+ * @stype: pointer to variable where stream type will be placed
+ * @rb_idx_raw: pointer to variable where read buffer index will be placed
+ *
+ * Function checks all streams for pending packets. It will stop as soon as
+ * packet ready to be submitted to user space is detected. Variables under
+ * pointers, passed as the parameters to this function will be updated with
+ * values pointing to right stream and buffer.
+ *
+ * Return: non-zero if any of timeline streams has at last one packet ready
+ */
+static int kbasep_tlstream_packet_pending(
+ enum tl_stream_type *stype,
+ unsigned int *rb_idx_raw)
+{
+ int pending = 0;
+
+ KBASE_DEBUG_ASSERT(stype);
+ KBASE_DEBUG_ASSERT(rb_idx_raw);
+
+ for (
+ *stype = 0;
+ (*stype < TL_STREAM_TYPE_COUNT) && !pending;
+ (*stype)++) {
+ if (NULL != tl_stream[*stype]) {
+ *rb_idx_raw = atomic_read(&tl_stream[*stype]->rbi);
+ /* Read buffer index may be updated by writer in case of
+ * overflow. Read and write buffer indexes must be
+ * loaded in correct order. */
+ smp_rmb();
+ if (atomic_read(&tl_stream[*stype]->wbi) != *rb_idx_raw)
+ pending = 1;
+ }
+ }
+ (*stype)--;
+
+ return pending;
+}
+
+/**
+ * kbasep_tlstream_read - copy data from streams to buffer provided by user
+ * @filp: pointer to file structure (unused)
+ * @buffer: pointer to the buffer provided by user
+ * @size: maximum amount of data that can be stored in the buffer
+ * @f_pos: pointer to file offset (unused)
+ *
+ * Return: number of bytes stored in the buffer
+ */
+static ssize_t kbasep_tlstream_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos)
+{
+ ssize_t copy_len = 0;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(f_pos);
+
+ if (!buffer)
+ return -EINVAL;
+
+ if ((0 > *f_pos) || (PACKET_SIZE > size))
+ return -EINVAL;
+
+ mutex_lock(&tl_reader_lock);
+
+ while (copy_len < size) {
+ enum tl_stream_type stype;
+ unsigned int rb_idx_raw = 0;
+ unsigned int rb_idx;
+ size_t rb_size;
+
+ /* If we don't have any data yet, wait for packet to be
+ * submitted. If we already read some packets and there is no
+ * packet pending return back to user. */
+ if (0 < copy_len) {
+ if (!kbasep_tlstream_packet_pending(
+ &stype,
+ &rb_idx_raw))
+ break;
+ } else {
+ if (wait_event_interruptible(
+ tl_event_queue,
+ kbasep_tlstream_packet_pending(
+ &stype,
+ &rb_idx_raw))) {
+ copy_len = -ERESTARTSYS;
+ break;
+ }
+ }
+
+ /* Check if this packet fits into the user buffer.
+ * If so copy its content. */
+ rb_idx = rb_idx_raw % PACKET_COUNT;
+ rb_size = atomic_read(&tl_stream[stype]->buffer[rb_idx].size);
+ if (rb_size > size - copy_len)
+ break;
+ if (copy_to_user(
+ &buffer[copy_len],
+ tl_stream[stype]->buffer[rb_idx].data,
+ rb_size)) {
+ copy_len = -EFAULT;
+ break;
+ }
+
+ /* If the rbi still points to the packet we just processed
+ * then there was no overflow so we add the copied size to
+ * copy_len and move rbi on to the next packet
+ */
+ smp_rmb();
+ if (atomic_read(&tl_stream[stype]->rbi) == rb_idx_raw) {
+ copy_len += rb_size;
+ atomic_inc(&tl_stream[stype]->rbi);
+
+#if MALI_UNIT_TEST
+ atomic_add(rb_size, &tlstream_bytes_collected);
+#endif /* MALI_UNIT_TEST */
+ }
+ }
+
+ mutex_unlock(&tl_reader_lock);
+
+ return copy_len;
+}
+
+/**
+ * kbasep_tlstream_poll - poll timeline stream for packets
+ * @filp: pointer to file structure
+ * @wait: pointer to poll table
+ * Return: POLLIN if data can be read without blocking, otherwise zero
+ */
+static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait)
+{
+ enum tl_stream_type stream_type;
+ unsigned int rb_idx;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(wait);
+
+ poll_wait(filp, &tl_event_queue, wait);
+ if (kbasep_tlstream_packet_pending(&stream_type, &rb_idx))
+ return POLLIN;
+ return 0;
+}
+
+/**
+ * kbasep_tlstream_release - release timeline stream descriptor
+ * @inode: pointer to inode structure
+ * @filp: pointer to file structure
+ *
+ * Return always return zero
+ */
+static int kbasep_tlstream_release(struct inode *inode, struct file *filp)
+{
+ KBASE_DEBUG_ASSERT(inode);
+ KBASE_DEBUG_ASSERT(filp);
+ CSTD_UNUSED(inode);
+ CSTD_UNUSED(filp);
+
+ /* Stop autoflush timer before releasing access to streams. */
+ atomic_set(&autoflush_timer_active, 0);
+ del_timer_sync(&autoflush_timer);
+
+ atomic_set(&kbase_tlstream_enabled, 0);
+ return 0;
+}
+
+/**
+ * kbasep_tlstream_timeline_header - prepare timeline header stream packet
+ * @stream_type: type of the stream that will carry header data
+ * @tp_desc: pointer to array with tracepoint descriptors
+ * @tp_count: number of descriptors in the given array
+ *
+ * Functions fills in information about tracepoints stored in body stream
+ * associated with this header stream.
+ */
+static void kbasep_tlstream_timeline_header(
+ enum tl_stream_type stream_type,
+ const struct tp_desc *tp_desc,
+ u32 tp_count)
+{
+ const u8 tv = SWTRACE_VERSION; /* protocol version */
+ const u8 ps = sizeof(void *); /* pointer size */
+ size_t msg_size = sizeof(tv) + sizeof(ps) + sizeof(tp_count);
+ char *buffer;
+ size_t pos = 0;
+ unsigned long flags;
+ unsigned int i;
+
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+ KBASE_DEBUG_ASSERT(tp_desc);
+
+ /* Calculate the size of the timeline message. */
+ for (i = 0; i < tp_count; i++) {
+ msg_size += sizeof(tp_desc[i].id);
+ msg_size +=
+ strnlen(tp_desc[i].id_str, STRLEN_MAX) +
+ sizeof(char) + sizeof(u32);
+ msg_size +=
+ strnlen(tp_desc[i].name, STRLEN_MAX) +
+ sizeof(char) + sizeof(u32);
+ msg_size +=
+ strnlen(tp_desc[i].arg_types, STRLEN_MAX) +
+ sizeof(char) + sizeof(u32);
+ msg_size +=
+ strnlen(tp_desc[i].arg_names, STRLEN_MAX) +
+ sizeof(char) + sizeof(u32);
+ }
+
+ KBASE_DEBUG_ASSERT(PACKET_SIZE - PACKET_HEADER_SIZE >= msg_size);
+
+ buffer = kbasep_tlstream_msgbuf_acquire(stream_type, msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &tv, sizeof(tv));
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &ps, sizeof(ps));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &tp_count, sizeof(tp_count));
+
+ for (i = 0; i < tp_count; i++) {
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos,
+ &tp_desc[i].id, sizeof(tp_desc[i].id));
+ pos = kbasep_tlstream_write_string(
+ buffer, pos,
+ tp_desc[i].id_str, msg_size - pos);
+ pos = kbasep_tlstream_write_string(
+ buffer, pos,
+ tp_desc[i].name, msg_size - pos);
+ pos = kbasep_tlstream_write_string(
+ buffer, pos,
+ tp_desc[i].arg_types, msg_size - pos);
+ pos = kbasep_tlstream_write_string(
+ buffer, pos,
+ tp_desc[i].arg_names, msg_size - pos);
+ }
+
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(stream_type, flags);
+
+ /* We don't expect any more data to be read in this stream.
+ * As header stream must be read before its associated body stream,
+ * make this packet visible to the user straightaway. */
+ kbasep_tlstream_flush_stream(stream_type);
+}
+
+/*****************************************************************************/
+
+int kbase_tlstream_init(void)
+{
+ enum tl_stream_type i;
+
+ /* Prepare stream structures. */
+ for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) {
+ tl_stream[i] = kmalloc(sizeof(**tl_stream), GFP_KERNEL);
+ if (!tl_stream[i])
+ break;
+ kbasep_timeline_stream_init(tl_stream[i], i);
+ }
+ if (TL_STREAM_TYPE_COUNT > i) {
+ for (; i > 0; i--) {
+ kbasep_timeline_stream_term(tl_stream[i - 1]);
+ kfree(tl_stream[i - 1]);
+ }
+ return -ENOMEM;
+ }
+
+ /* Initialize autoflush timer. */
+ atomic_set(&autoflush_timer_active, 0);
+ setup_timer(&autoflush_timer,
+ kbasep_tlstream_autoflush_timer_callback,
+ 0);
+
+ return 0;
+}
+
+void kbase_tlstream_term(void)
+{
+ enum tl_stream_type i;
+
+ for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) {
+ kbasep_timeline_stream_term(tl_stream[i]);
+ kfree(tl_stream[i]);
+ }
+}
+
+int kbase_tlstream_acquire(struct kbase_context *kctx, int *fd, u32 flags)
+{
+ u32 tlstream_enabled = TLSTREAM_ENABLED | flags;
+
+ if (0 == atomic_cmpxchg(&kbase_tlstream_enabled, 0, tlstream_enabled)) {
+ int rcode;
+
+ *fd = anon_inode_getfd(
+ "[mali_tlstream]",
+ &kbasep_tlstream_fops,
+ kctx,
+ O_RDONLY | O_CLOEXEC);
+ if (0 > *fd) {
+ atomic_set(&kbase_tlstream_enabled, 0);
+ return *fd;
+ }
+
+ /* Reset and initialize header streams. */
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_OBJ_HEADER]);
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_OBJ_SUMMARY]);
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_AUX_HEADER]);
+ kbasep_tlstream_timeline_header(
+ TL_STREAM_TYPE_OBJ_HEADER,
+ tp_desc_obj,
+ ARRAY_SIZE(tp_desc_obj));
+ kbasep_tlstream_timeline_header(
+ TL_STREAM_TYPE_AUX_HEADER,
+ tp_desc_aux,
+ ARRAY_SIZE(tp_desc_aux));
+
+ /* Start autoflush timer. */
+ atomic_set(&autoflush_timer_active, 1);
+ rcode = mod_timer(
+ &autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ CSTD_UNUSED(rcode);
+
+ } else {
+ *fd = -EBUSY;
+ }
+
+ return 0;
+}
+
+void kbase_tlstream_flush_streams(void)
+{
+ enum tl_stream_type stype;
+
+ for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++)
+ kbasep_tlstream_flush_stream(stype);
+}
+
+void kbase_tlstream_reset_body_streams(void)
+{
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_OBJ]);
+ kbasep_timeline_stream_reset(
+ tl_stream[TL_STREAM_TYPE_AUX]);
+}
+
+#if MALI_UNIT_TEST
+void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated)
+{
+ KBASE_DEBUG_ASSERT(bytes_collected);
+ KBASE_DEBUG_ASSERT(bytes_generated);
+ *bytes_collected = atomic_read(&tlstream_bytes_collected);
+ *bytes_generated = atomic_read(&tlstream_bytes_generated);
+}
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+void __kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr, u32 tgid)
+{
+ const u32 msg_id = KBASE_TL_NEW_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) +
+ sizeof(tgid);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &tgid, sizeof(tgid));
+
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count)
+{
+ const u32 msg_id = KBASE_TL_NEW_GPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(gpu) + sizeof(id) +
+ sizeof(core_count);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &id, sizeof(id));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &core_count, sizeof(core_count));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn)
+{
+ const u32 msg_id = KBASE_TL_NEW_LPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(nr) +
+ sizeof(fn);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &fn, sizeof(fn));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu)
+{
+ const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_as(void *as, u32 nr)
+{
+ const u32 msg_id = KBASE_TL_NEW_AS;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(nr);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu)
+{
+ const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ_SUMMARY,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+/*****************************************************************************/
+
+void __kbase_tlstream_tl_new_ctx(void *context, u32 nr, u32 tgid)
+{
+ const u32 msg_id = KBASE_TL_NEW_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) +
+ sizeof(tgid);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &tgid, sizeof(tgid));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_new_atom(void *atom, u32 nr)
+{
+ const u32 msg_id = KBASE_TL_NEW_ATOM;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
+ sizeof(nr);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &nr, sizeof(nr));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_del_ctx(void *context)
+{
+ const u32 msg_id = KBASE_TL_DEL_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_del_atom(void *atom)
+{
+ const u32 msg_id = KBASE_TL_DEL_ATOM;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_ctx_lpu(void *context, void *lpu)
+{
+ const u32 msg_id = KBASE_TL_RET_CTX_LPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_lpu(
+ void *atom, void *lpu, const char *attrib_match_list)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_LPU;
+ const size_t msg_s0 = sizeof(u32) + sizeof(char) +
+ strnlen(attrib_match_list, STRLEN_MAX);
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) +
+ sizeof(atom) + sizeof(lpu) + msg_s0;
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ pos = kbasep_tlstream_write_string(
+ buffer, pos, attrib_match_list, msg_s0);
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_ctx_lpu(void *context, void *lpu)
+{
+ const u32 msg_id = KBASE_TL_NRET_CTX_LPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &context, sizeof(context));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_dep_atom_atom(void *atom1, void *atom2)
+{
+ const u32 msg_id = KBASE_TL_DEP_ATOM_ATOM;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom1, sizeof(atom1));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom2, sizeof(atom2));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ndep_atom_atom(void *atom1, void *atom2)
+{
+ const u32 msg_id = KBASE_TL_NDEP_ATOM_ATOM;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom1, sizeof(atom1));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom2, sizeof(atom2));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_rdep_atom_atom(void *atom1, void *atom2)
+{
+ const u32 msg_id = KBASE_TL_RDEP_ATOM_ATOM;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom1, sizeof(atom1));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom2, sizeof(atom2));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_LPU;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(lpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx)
+{
+ const u32 msg_id = KBASE_TL_RET_AS_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &ctx, sizeof(ctx));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx)
+{
+ const u32 msg_id = KBASE_TL_NRET_AS_CTX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &ctx, sizeof(ctx));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_as(void *atom, void *as)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_AS;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_as(void *atom, void *as)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_AS;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_config(
+ void *atom, u64 jd, u64 affinity, u32 config)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
+ sizeof(jd) + sizeof(affinity) + sizeof(config);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &jd, sizeof(jd));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &affinity, sizeof(affinity));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &config, sizeof(config));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority(void *atom, u32 prio)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(prio);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &prio, sizeof(prio));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_state(void *atom, u32 state)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(state);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &state, sizeof(state));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority_change(void *atom)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_as_config(
+ void *as, u64 transtab, u64 memattr, u64 transcfg)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(as) +
+ sizeof(transtab) + sizeof(memattr) + sizeof(transcfg);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &as, sizeof(as));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &transtab, sizeof(transtab));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &memattr, sizeof(memattr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &transcfg, sizeof(transcfg));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_lpu_softstop(void *lpu)
+{
+ const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(lpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &lpu, sizeof(lpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_jd_gpu_soft_reset(void *gpu)
+{
+ const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &gpu, sizeof(gpu));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+/*****************************************************************************/
+
+void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state)
+{
+ const u32 msg_id = KBASE_AUX_PM_STATE;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(core_type) +
+ sizeof(state);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &core_type, sizeof(core_type));
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &state, sizeof(state));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change)
+{
+ const u32 msg_id = KBASE_AUX_PAGEFAULT;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) +
+ sizeof(page_count_change);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX, msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos,
+ &page_count_change, sizeof(page_count_change));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_pagesalloc(u32 ctx_nr, u64 page_count)
+{
+ const u32 msg_id = KBASE_AUX_PAGESALLOC;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) +
+ sizeof(page_count);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX, msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &page_count, sizeof(page_count));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_devfreq_target(u64 target_freq)
+{
+ const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(target_freq);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_AUX, msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &target_freq, sizeof(target_freq));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
diff --git a/midgard-0.r2p0/mali_kbase_tlstream.h b/midgard-0.r2p0/mali_kbase_tlstream.h
new file mode 100755
index 0000000..e29be71
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_tlstream.h
@@ -0,0 +1,558 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#if !defined(_KBASE_TLSTREAM_H)
+#define _KBASE_TLSTREAM_H
+
+#include <mali_kbase.h>
+
+/*****************************************************************************/
+
+/**
+ * kbase_tlstream_init - initialize timeline infrastructure in kernel
+ * Return: zero on success, negative number on error
+ */
+int kbase_tlstream_init(void);
+
+/**
+ * kbase_tlstream_term - terminate timeline infrastructure in kernel
+ *
+ * Timeline need have to been previously enabled with kbase_tlstream_init().
+ */
+void kbase_tlstream_term(void);
+
+/**
+ * kbase_tlstream_acquire - acquire timeline stream file descriptor
+ * @kctx: kernel common context
+ * @fd: timeline stream file descriptor
+ * @flags: timeline stream flags
+ *
+ * This descriptor is meant to be used by userspace timeline to gain access to
+ * kernel timeline stream. This stream is later broadcasted by user space to the
+ * timeline client.
+ * Only one entity can own the descriptor at any given time. Descriptor shall be
+ * closed if unused. If descriptor cannot be obtained (i.e. when it is already
+ * being used) argument fd will contain negative value.
+ *
+ * Return: zero on success (this does not necessarily mean that stream
+ * descriptor could be returned), negative number on error
+ */
+int kbase_tlstream_acquire(struct kbase_context *kctx, int *fd, u32 flags);
+
+/**
+ * kbase_tlstream_flush_streams - flush timeline streams.
+ *
+ * Function will flush pending data in all timeline streams.
+ */
+void kbase_tlstream_flush_streams(void);
+
+/**
+ * kbase_tlstream_reset_body_streams - reset timeline body streams.
+ *
+ * Function will discard pending data in all timeline body streams.
+ */
+void kbase_tlstream_reset_body_streams(void);
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_tlstream_test - start timeline stream data generator
+ * @tpw_count: number of trace point writers in each context
+ * @msg_delay: time delay in milliseconds between trace points written by one
+ * writer
+ * @msg_count: number of trace points written by one writer
+ * @aux_msg: if non-zero aux messages will be included
+ *
+ * This test starts a requested number of asynchronous writers in both IRQ and
+ * thread context. Each writer will generate required number of test
+ * tracepoints (tracepoints with embedded information about writer that
+ * should be verified by user space reader). Tracepoints will be emitted in
+ * all timeline body streams. If aux_msg is non-zero writer will also
+ * generate not testable tracepoints (tracepoints without information about
+ * writer). These tracepoints are used to check correctness of remaining
+ * timeline message generating functions. Writer will wait requested time
+ * between generating another set of messages. This call blocks until all
+ * writers finish.
+ */
+void kbase_tlstream_test(
+ unsigned int tpw_count,
+ unsigned int msg_delay,
+ unsigned int msg_count,
+ int aux_msg);
+
+/**
+ * kbase_tlstream_stats - read timeline stream statistics
+ * @bytes_collected: will hold number of bytes read by the user
+ * @bytes_generated: will hold number of bytes generated by trace points
+ */
+void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+#define TL_ATOM_STATE_IDLE 0
+#define TL_ATOM_STATE_READY 1
+#define TL_ATOM_STATE_DONE 2
+#define TL_ATOM_STATE_POSTED 3
+
+void __kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr, u32 tgid);
+void __kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count);
+void __kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn);
+void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu);
+void __kbase_tlstream_tl_summary_new_as(void *as, u32 nr);
+void __kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu);
+void __kbase_tlstream_tl_new_ctx(void *context, u32 nr, u32 tgid);
+void __kbase_tlstream_tl_new_atom(void *atom, u32 nr);
+void __kbase_tlstream_tl_del_ctx(void *context);
+void __kbase_tlstream_tl_del_atom(void *atom);
+void __kbase_tlstream_tl_ret_ctx_lpu(void *context, void *lpu);
+void __kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context);
+void __kbase_tlstream_tl_ret_atom_lpu(
+ void *atom, void *lpu, const char *attrib_match_list);
+void __kbase_tlstream_tl_nret_ctx_lpu(void *context, void *lpu);
+void __kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context);
+void __kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu);
+void __kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx);
+void __kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx);
+void __kbase_tlstream_tl_ret_atom_as(void *atom, void *as);
+void __kbase_tlstream_tl_nret_atom_as(void *atom, void *as);
+void __kbase_tlstream_tl_dep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_ndep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_rdep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_attrib_atom_config(
+ void *atom, u64 jd, u64 affinity, u32 config);
+void __kbase_tlstream_tl_attrib_atom_priority(void *atom, u32 prio);
+void __kbase_tlstream_tl_attrib_atom_state(void *atom, u32 state);
+void __kbase_tlstream_tl_attrib_atom_priority_change(void *atom);
+void __kbase_tlstream_tl_attrib_as_config(
+ void *as, u64 transtab, u64 memattr, u64 transcfg);
+void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom);
+void __kbase_tlstream_tl_event_lpu_softstop(void *lpu);
+void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom);
+void __kbase_tlstream_jd_gpu_soft_reset(void *gpu);
+void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state);
+void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change);
+void __kbase_tlstream_aux_pagesalloc(u32 ctx_nr, u64 page_count);
+void __kbase_tlstream_aux_devfreq_target(u64 target_freq);
+
+#define TLSTREAM_ENABLED (1 << 31)
+
+extern atomic_t kbase_tlstream_enabled;
+
+#define __TRACE_IF_ENABLED(trace_name, ...) \
+ do { \
+ int enabled = atomic_read(&kbase_tlstream_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_##trace_name(__VA_ARGS__); \
+ } while (0)
+
+#define __TRACE_IF_ENABLED_LATENCY(trace_name, ...) \
+ do { \
+ int enabled = atomic_read(&kbase_tlstream_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_##trace_name(__VA_ARGS__); \
+ } while (0)
+
+/*****************************************************************************/
+
+/**
+ * kbase_tlstream_tl_summary_new_ctx - create context object in timeline
+ * summary
+ * @context: name of the context object
+ * @nr: context number
+ * @tgid: thread Group Id
+ *
+ * Function emits a timeline message informing about context creation. Context
+ * is created with context number (its attribute), that can be used to link
+ * kbase context with userspace context.
+ * This message is directed to timeline summary stream.
+ */
+#define kbase_tlstream_tl_summary_new_ctx(context, nr, tgid) \
+ __TRACE_IF_ENABLED(tl_summary_new_ctx, context, nr, tgid)
+
+/**
+ * kbase_tlstream_tl_summary_new_gpu - create GPU object in timeline summary
+ * @gpu: name of the GPU object
+ * @id: id value of this GPU
+ * @core_count: number of cores this GPU hosts
+ *
+ * Function emits a timeline message informing about GPU creation. GPU is
+ * created with two attributes: id and core count.
+ * This message is directed to timeline summary stream.
+ */
+#define kbase_tlstream_tl_summary_new_gpu(gpu, id, core_count) \
+ __TRACE_IF_ENABLED(tl_summary_new_gpu, gpu, id, core_count)
+
+/**
+ * kbase_tlstream_tl_summary_new_lpu - create LPU object in timeline summary
+ * @lpu: name of the Logical Processing Unit object
+ * @nr: sequential number assigned to this LPU
+ * @fn: property describing this LPU's functional abilities
+ *
+ * Function emits a timeline message informing about LPU creation. LPU is
+ * created with two attributes: number linking this LPU with GPU's job slot
+ * and function bearing information about this LPU abilities.
+ * This message is directed to timeline summary stream.
+ */
+#define kbase_tlstream_tl_summary_new_lpu(lpu, nr, fn) \
+ __TRACE_IF_ENABLED(tl_summary_new_lpu, lpu, nr, fn)
+
+/**
+ * kbase_tlstream_tl_summary_lifelink_lpu_gpu - lifelink LPU object to GPU
+ * @lpu: name of the Logical Processing Unit object
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message informing that LPU object shall be deleted
+ * along with GPU object.
+ * This message is directed to timeline summary stream.
+ */
+#define kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, gpu) \
+ __TRACE_IF_ENABLED(tl_summary_lifelink_lpu_gpu, lpu, gpu)
+
+/**
+ * kbase_tlstream_tl_summary_new_as - create address space object in timeline summary
+ * @as: name of the address space object
+ * @nr: sequential number assigned to this address space
+ *
+ * Function emits a timeline message informing about address space creation.
+ * Address space is created with one attribute: number identifying this
+ * address space.
+ * This message is directed to timeline summary stream.
+ */
+#define kbase_tlstream_tl_summary_new_as(as, nr) \
+ __TRACE_IF_ENABLED(tl_summary_new_as, as, nr)
+
+/**
+ * kbase_tlstream_tl_summary_lifelink_as_gpu - lifelink address space object to GPU
+ * @as: name of the address space object
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message informing that address space object
+ * shall be deleted along with GPU object.
+ * This message is directed to timeline summary stream.
+ */
+#define kbase_tlstream_tl_summary_lifelink_as_gpu(as, gpu) \
+ __TRACE_IF_ENABLED(tl_summary_lifelink_as_gpu, as, gpu)
+
+/**
+ * kbase_tlstream_tl_new_ctx - create context object in timeline
+ * @context: name of the context object
+ * @nr: context number
+ * @tgid: thread Group Id
+ *
+ * Function emits a timeline message informing about context creation. Context
+ * is created with context number (its attribute), that can be used to link
+ * kbase context with userspace context.
+ */
+#define kbase_tlstream_tl_new_ctx(context, nr, tgid) \
+ __TRACE_IF_ENABLED(tl_new_ctx, context, nr, tgid)
+
+/**
+ * kbase_tlstream_tl_new_atom - create atom object in timeline
+ * @atom: name of the atom object
+ * @nr: sequential number assigned to this atom
+ *
+ * Function emits a timeline message informing about atom creation. Atom is
+ * created with atom number (its attribute) that links it with actual work
+ * bucket id understood by hardware.
+ */
+#define kbase_tlstream_tl_new_atom(atom, nr) \
+ __TRACE_IF_ENABLED(tl_new_atom, atom, nr)
+
+/**
+ * kbase_tlstream_tl_del_ctx - destroy context object in timeline
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that context object ceased to
+ * exist.
+ */
+#define kbase_tlstream_tl_del_ctx(context) \
+ __TRACE_IF_ENABLED(tl_del_ctx, context)
+
+/**
+ * kbase_tlstream_tl_del_atom - destroy atom object in timeline
+ * @atom: name of the atom object
+ *
+ * Function emits a timeline message informing that atom object ceased to
+ * exist.
+ */
+#define kbase_tlstream_tl_del_atom(atom) \
+ __TRACE_IF_ENABLED(tl_del_atom, atom)
+
+/**
+ * kbase_tlstream_tl_ret_ctx_lpu - retain context by LPU
+ * @context: name of the context object
+ * @lpu: name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that context is being held
+ * by LPU and must not be deleted unless it is released.
+ */
+#define kbase_tlstream_tl_ret_ctx_lpu(context, lpu) \
+ __TRACE_IF_ENABLED(tl_ret_ctx_lpu, context, lpu)
+
+/**
+ * kbase_tlstream_tl_ret_atom_ctx - retain atom by context
+ * @atom: name of the atom object
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by context and must not be deleted unless it is released.
+ */
+#define kbase_tlstream_tl_ret_atom_ctx(atom, context) \
+ __TRACE_IF_ENABLED(tl_ret_atom_ctx, atom, context)
+
+/**
+ * kbase_tlstream_tl_ret_atom_lpu - retain atom by LPU
+ * @atom: name of the atom object
+ * @lpu: name of the Logical Processing Unit object
+ * @attrib_match_list: list containing match operator attributes
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by LPU and must not be deleted unless it is released.
+ */
+#define kbase_tlstream_tl_ret_atom_lpu(atom, lpu, attrib_match_list) \
+ __TRACE_IF_ENABLED(tl_ret_atom_lpu, atom, lpu, attrib_match_list)
+
+/**
+ * kbase_tlstream_tl_nret_ctx_lpu - release context by LPU
+ * @context: name of the context object
+ * @lpu: name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that context is being released
+ * by LPU object.
+ */
+#define kbase_tlstream_tl_nret_ctx_lpu(context, lpu) \
+ __TRACE_IF_ENABLED(tl_nret_ctx_lpu, context, lpu)
+
+/**
+ * kbase_tlstream_tl_nret_atom_ctx - release atom by context
+ * @atom: name of the atom object
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by context.
+ */
+#define kbase_tlstream_tl_nret_atom_ctx(atom, context) \
+ __TRACE_IF_ENABLED(tl_nret_atom_ctx, atom, context)
+
+/**
+ * kbase_tlstream_tl_nret_atom_lpu - release atom by LPU
+ * @atom: name of the atom object
+ * @lpu: name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by LPU.
+ */
+#define kbase_tlstream_tl_nret_atom_lpu(atom, lpu) \
+ __TRACE_IF_ENABLED(tl_nret_atom_lpu, atom, lpu)
+
+/**
+ * kbase_tlstream_tl_ret_as_ctx - lifelink address space object to context
+ * @as: name of the address space object
+ * @ctx: name of the context object
+ *
+ * Function emits a timeline message informing that address space object
+ * is being held by the context object.
+ */
+#define kbase_tlstream_tl_ret_as_ctx(as, ctx) \
+ __TRACE_IF_ENABLED(tl_ret_as_ctx, as, ctx)
+
+/**
+ * kbase_tlstream_tl_nret_as_ctx - release address space by context
+ * @as: name of the address space object
+ * @ctx: name of the context object
+ *
+ * Function emits a timeline message informing that address space object
+ * is being released by atom.
+ */
+#define kbase_tlstream_tl_nret_as_ctx(as, ctx) \
+ __TRACE_IF_ENABLED(tl_nret_as_ctx, as, ctx)
+
+/**
+ * kbase_tlstream_tl_ret_atom_as - retain atom by address space
+ * @atom: name of the atom object
+ * @as: name of the address space object
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by address space and must not be deleted unless it is released.
+ */
+#define kbase_tlstream_tl_ret_atom_as(atom, as) \
+ __TRACE_IF_ENABLED(tl_ret_atom_as, atom, as)
+
+/**
+ * kbase_tlstream_tl_nret_atom_as - release atom by address space
+ * @atom: name of the atom object
+ * @as: name of the address space object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by address space.
+ */
+#define kbase_tlstream_tl_nret_atom_as(atom, as) \
+ __TRACE_IF_ENABLED(tl_nret_atom_as, atom, as)
+
+/**
+ * kbase_tlstream_tl_dep_atom_atom - parent atom depends on child atom
+ * @atom1: name of the child atom object
+ * @atom2: name of the parent atom object that depends on child atom
+ *
+ * Function emits a timeline message informing that parent atom waits for
+ * child atom object to be completed before start its execution.
+ */
+#define kbase_tlstream_tl_dep_atom_atom(atom1, atom2) \
+ __TRACE_IF_ENABLED(tl_dep_atom_atom, atom1, atom2)
+
+/**
+ * kbase_tlstream_tl_ndep_atom_atom - dependency between atoms resolved
+ * @atom1: name of the child atom object
+ * @atom2: name of the parent atom object that depended on child atom
+ *
+ * Function emits a timeline message informing that parent atom execution
+ * dependency on child atom has been resolved.
+ */
+#define kbase_tlstream_tl_ndep_atom_atom(atom1, atom2) \
+ __TRACE_IF_ENABLED(tl_ndep_atom_atom, atom1, atom2)
+
+/**
+ * kbase_tlstream_tl_rdep_atom_atom - information about already resolved dependency between atoms
+ * @atom1: name of the child atom object
+ * @atom2: name of the parent atom object that depended on child atom
+ *
+ * Function emits a timeline message informing that parent atom execution
+ * dependency on child atom has been resolved.
+ */
+#define kbase_tlstream_tl_rdep_atom_atom(atom1, atom2) \
+ __TRACE_IF_ENABLED(tl_rdep_atom_atom, atom1, atom2)
+
+/**
+ * kbase_tlstream_tl_attrib_atom_config - atom job slot attributes
+ * @atom: name of the atom object
+ * @jd: job descriptor address
+ * @affinity: job affinity
+ * @config: job config
+ *
+ * Function emits a timeline message containing atom attributes.
+ */
+#define kbase_tlstream_tl_attrib_atom_config(atom, jd, affinity, config) \
+ __TRACE_IF_ENABLED(tl_attrib_atom_config, atom, jd, affinity, config)
+
+/**
+ * kbase_tlstream_tl_attrib_atom_priority - atom priority
+ * @atom: name of the atom object
+ * @prio: atom priority
+ *
+ * Function emits a timeline message containing atom priority.
+ */
+#define kbase_tlstream_tl_attrib_atom_priority(atom, prio) \
+ __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_priority, atom, prio)
+
+/**
+ * kbase_tlstream_tl_attrib_atom_state - atom state
+ * @atom: name of the atom object
+ * @state: atom state
+ *
+ * Function emits a timeline message containing atom state.
+ */
+#define kbase_tlstream_tl_attrib_atom_state(atom, state) \
+ __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_state, atom, state)
+
+/**
+ * kbase_tlstream_tl_attrib_atom_priority_change - atom caused priority change
+ * @atom: name of the atom object
+ *
+ * Function emits a timeline message signalling priority change
+ */
+#define kbase_tlstream_tl_attrib_atom_priority_change(atom) \
+ __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_priority_change, atom)
+
+/**
+ * kbase_tlstream_tl_attrib_as_config - address space attributes
+ * @as: assigned address space
+ * @transtab: configuration of the TRANSTAB register
+ * @memattr: configuration of the MEMATTR register
+ * @transcfg: configuration of the TRANSCFG register (or zero if not present)
+ *
+ * Function emits a timeline message containing address space attributes.
+ */
+#define kbase_tlstream_tl_attrib_as_config(as, transtab, memattr, transcfg) \
+ __TRACE_IF_ENABLED(tl_attrib_as_config, as, transtab, memattr, transcfg)
+
+/**
+ * kbase_tlstream_tl_event_atom_softstop_ex
+ * @atom: atom identifier
+ */
+#define kbase_tlstream_tl_event_atom_softstop_ex(atom) \
+ __TRACE_IF_ENABLED(tl_event_atom_softstop_ex, atom)
+
+/**
+ * kbase_tlstream_tl_event_lpu_softstop
+ * @lpu: name of the LPU object
+ */
+#define kbase_tlstream_tl_event_lpu_softstop(lpu) \
+ __TRACE_IF_ENABLED(tl_event_lpu_softstop, lpu)
+
+/**
+ * kbase_tlstream_tl_event_atom_softstop_issue
+ * @atom: atom identifier
+ */
+#define kbase_tlstream_tl_event_atom_softstop_issue(atom) \
+ __TRACE_IF_ENABLED(tl_event_atom_softstop_issue, atom)
+
+/**
+ * kbase_tlstream_jd_gpu_soft_reset - The GPU is being soft reset
+ * @gpu: name of the GPU object
+ *
+ * This imperative tracepoint is specific to job dumping.
+ * Function emits a timeline message indicating GPU soft reset.
+ */
+#define kbase_tlstream_jd_gpu_soft_reset(gpu) \
+ __TRACE_IF_ENABLED(jd_gpu_soft_reset, gpu)
+
+/**
+ * kbase_tlstream_aux_pm_state - timeline message: power management state
+ * @core_type: core type (shader, tiler, l2 cache, l3 cache)
+ * @state: 64bits bitmask reporting power state of the cores (1-ON, 0-OFF)
+ */
+#define kbase_tlstream_aux_pm_state(core_type, state) \
+ __TRACE_IF_ENABLED(aux_pm_state, core_type, state)
+
+/**
+ * kbase_tlstream_aux_pagefault - timeline message: MMU page fault event
+ * resulting in new pages being mapped
+ * @ctx_nr: kernel context number
+ * @page_count_change: number of pages to be added
+ */
+#define kbase_tlstream_aux_pagefault(ctx_nr, page_count_change) \
+ __TRACE_IF_ENABLED(aux_pagefault, ctx_nr, page_count_change)
+
+/**
+ * kbase_tlstream_aux_pagesalloc - timeline message: total number of allocated
+ * pages is changed
+ * @ctx_nr: kernel context number
+ * @page_count: number of pages used by the context
+ */
+#define kbase_tlstream_aux_pagesalloc(ctx_nr, page_count) \
+ __TRACE_IF_ENABLED(aux_pagesalloc, ctx_nr, page_count)
+
+/**
+ * kbase_tlstream_aux_devfreq_target - timeline message: new target DVFS
+ * frequency
+ * @target_freq: new target frequency
+ */
+#define kbase_tlstream_aux_devfreq_target(target_freq) \
+ __TRACE_IF_ENABLED(aux_devfreq_target, target_freq)
+
+#endif /* _KBASE_TLSTREAM_H */
+
diff --git a/midgard-0.r2p0/mali_kbase_trace_defs.h b/midgard-0.r2p0/mali_kbase_trace_defs.h
new file mode 100755
index 0000000..e2e0544
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_trace_defs.h
@@ -0,0 +1,264 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE *****
+ * ***** DO NOT INCLUDE DIRECTLY *****
+ * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
+
+/*
+ * The purpose of this header file is just to contain a list of trace code idenitifers
+ *
+ * Each identifier is wrapped in a macro, so that its string form and enum form can be created
+ *
+ * Each macro is separated with a comma, to allow insertion into an array initializer or enum definition block.
+ *
+ * This allows automatic creation of an enum and a corresponding array of strings
+ *
+ * Before #including, the includer MUST #define KBASE_TRACE_CODE_MAKE_CODE.
+ * After #including, the includer MUST #under KBASE_TRACE_CODE_MAKE_CODE.
+ *
+ * e.g.:
+ * #define KBASE_TRACE_CODE( X ) KBASE_TRACE_CODE_ ## X
+ * typedef enum
+ * {
+ * #define KBASE_TRACE_CODE_MAKE_CODE( X ) KBASE_TRACE_CODE( X )
+ * #include "mali_kbase_trace_defs.h"
+ * #undef KBASE_TRACE_CODE_MAKE_CODE
+ * } kbase_trace_code;
+ *
+ * IMPORTANT: THIS FILE MUST NOT BE USED FOR ANY OTHER PURPOSE OTHER THAN THE ABOVE
+ *
+ *
+ * The use of the macro here is:
+ * - KBASE_TRACE_CODE_MAKE_CODE( X )
+ *
+ * Which produces:
+ * - For an enum, KBASE_TRACE_CODE_X
+ * - For a string, "X"
+ *
+ *
+ * For example:
+ * - KBASE_TRACE_CODE_MAKE_CODE( JM_JOB_COMPLETE ) expands to:
+ * - KBASE_TRACE_CODE_JM_JOB_COMPLETE for the enum
+ * - "JM_JOB_COMPLETE" for the string
+ * - To use it to trace an event, do:
+ * - KBASE_TRACE_ADD( kbdev, JM_JOB_COMPLETE, subcode, kctx, uatom, val );
+ */
+
+#if 0 /* Dummy section to avoid breaking formatting */
+int dummy_array[] = {
+#endif
+
+/*
+ * Core events
+ */
+ /* no info_val, no gpu_addr, no atom */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY),
+ /* no info_val, no gpu_addr, no atom */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM),
+ /* info_val == GPU_IRQ_STATUS register */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ),
+ /* info_val == bits cleared */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR),
+ /* info_val == GPU_IRQ_STATUS register */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_SOFT_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_HARD_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_CLEAR),
+ /* GPU addr==dump address */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_CLEAN_INV_CACHES),
+/*
+ * Job Slot management events
+ */
+ /* info_val==irq rawstat at start */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ),
+ /* info_val==jobs processed */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ_END),
+/* In the following:
+ *
+ * - ctx is set if a corresponding job found (NULL otherwise, e.g. some soft-stop cases)
+ * - uatom==kernel-side mapped uatom address (for correlation with user-side)
+ */
+ /* info_val==exit code; gpu_addr==chain gpuaddr */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE),
+ /* gpu_addr==JS_HEAD_NEXT written, info_val==lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT),
+ /* gpu_addr is as follows:
+ * - If JS_STATUS active after soft-stop, val==gpu addr written to
+ * JS_HEAD on submit
+ * - otherwise gpu_addr==0 */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_0),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_1),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1),
+ /* gpu_addr==JS_TAIL read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD),
+/* gpu_addr is as follows:
+ * - If JS_STATUS active before soft-stop, val==JS_HEAD
+ * - otherwise gpu_addr==0
+ */
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS_DONE),
+ /* info_val == is_scheduled */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED),
+ /* info_val == is_scheduled */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_SCHEDULED),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_DONE),
+ /* info_val == nr jobs submitted */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP),
+ /* gpu_addr==JS_HEAD_NEXT last written */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT_AFTER_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_BEGIN_RESET_WORKER),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_END_RESET_WORKER),
+/*
+ * Job dispatch events
+ */
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER_END),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_TRY_RUN_NEXT_JOB),
+ /* gpu_addr==0, info_val==0, uatom==0 */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL_WORKER),
+/*
+ * Scheduler Core events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX_NOLOCK),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB),
+ /* gpu_addr==last value written/would be written to JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RELEASE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_TRY_SCHEDULE_HEAD_CTX),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_RETRY_NEEDED),
+ /* kctx is the one being evicted, info_val == kctx to put in */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_FAST_START_EVICTS_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_SUBMIT_TO_BLOCKED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_CORES_FAILED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_INUSE_FAILED),
+ /* info_val == lower 32 bits of rechecked affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED),
+ /* info_val == lower 32 bits of rechecked affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_AFFINITY_WOULD_VIOLATE),
+ /* info_val == the ctx attribute now on ctx */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX),
+ /* info_val == the ctx attribute now on runpool */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_RUNPOOL),
+ /* info_val == the ctx attribute now off ctx */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX),
+ /* info_val == the ctx attribute now off runpool */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL),
+/*
+ * Scheduler Policy events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_INIT_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TERM_CTX),
+ /* info_val == whether it was evicted */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_FOREACH_CTX_JOBS),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_HEAD_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_ADD_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_REMOVE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB_IRQ),
+ /* gpu_addr==JS_HEAD to write if the job were run */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_START),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_END),
+/*
+ * Power Management Events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERING_UP),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERED_UP),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE_TILER),
+ /* PM_DESIRED_REACHED: gpu_addr == pm.gpu_in_desired_state */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_INUSE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_INUSE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_INUSE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_INUSE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_WAKE_WAITERS),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_ACTIVE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_IDLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_ON),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_OFF),
+ /* info_val == policy number, or -1 for "Already changing" */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CA_SET_POLICY),
+ /* info_val == policy number */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT),
+ /* info_val == policy number */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM),
+/* Unused code just to make it easier to not have a comma at the end.
+ * All other codes MUST come before this */
+ KBASE_TRACE_CODE_MAKE_CODE(DUMMY)
+
+#if 0 /* Dummy section to avoid breaking formatting */
+};
+#endif
+
+/* ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
diff --git a/midgard-0.r2p0/mali_kbase_trace_timeline.c b/midgard-0.r2p0/mali_kbase_trace_timeline.c
new file mode 100755
index 0000000..5830e87
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_trace_timeline.c
@@ -0,0 +1,236 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_hwaccess_jm.h>
+
+#define CREATE_TRACE_POINTS
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+#include "mali_timeline.h"
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atoms_in_flight);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atom);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_active);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_action);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_power_active);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_l2_power_active);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_slot_atom);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_checktrans);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_context_active);
+
+struct kbase_trace_timeline_desc {
+ char *enum_str;
+ char *desc;
+ char *format;
+ char *format_desc;
+};
+
+static struct kbase_trace_timeline_desc kbase_trace_timeline_desc_table[] = {
+ #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) { #enum_val, desc, format, format_desc }
+ #include "mali_kbase_trace_timeline_defs.h"
+ #undef KBASE_TIMELINE_TRACE_CODE
+};
+
+#define KBASE_NR_TRACE_CODES ARRAY_SIZE(kbase_trace_timeline_desc_table)
+
+static void *kbasep_trace_timeline_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos >= KBASE_NR_TRACE_CODES)
+ return NULL;
+
+ return &kbase_trace_timeline_desc_table[*pos];
+}
+
+static void kbasep_trace_timeline_seq_stop(struct seq_file *s, void *data)
+{
+}
+
+static void *kbasep_trace_timeline_seq_next(struct seq_file *s, void *data, loff_t *pos)
+{
+ (*pos)++;
+
+ if (*pos == KBASE_NR_TRACE_CODES)
+ return NULL;
+
+ return &kbase_trace_timeline_desc_table[*pos];
+}
+
+static int kbasep_trace_timeline_seq_show(struct seq_file *s, void *data)
+{
+ struct kbase_trace_timeline_desc *trace_desc = data;
+
+ seq_printf(s, "%s#%s#%s#%s\n", trace_desc->enum_str, trace_desc->desc, trace_desc->format, trace_desc->format_desc);
+ return 0;
+}
+
+
+static const struct seq_operations kbasep_trace_timeline_seq_ops = {
+ .start = kbasep_trace_timeline_seq_start,
+ .next = kbasep_trace_timeline_seq_next,
+ .stop = kbasep_trace_timeline_seq_stop,
+ .show = kbasep_trace_timeline_seq_show,
+};
+
+static int kbasep_trace_timeline_debugfs_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &kbasep_trace_timeline_seq_ops);
+}
+
+static const struct file_operations kbasep_trace_timeline_debugfs_fops = {
+ .open = kbasep_trace_timeline_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev)
+{
+ debugfs_create_file("mali_timeline_defs",
+ S_IRUGO, kbdev->mali_debugfs_directory, NULL,
+ &kbasep_trace_timeline_debugfs_fops);
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->timeline.slot_atoms_submitted[js] > 0) {
+ KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 1);
+ } else {
+ base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
+
+ KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 1);
+ KBASE_TIMELINE_JOB_START(kctx, js, atom_number);
+ }
+ ++kbdev->timeline.slot_atoms_submitted[js];
+
+ KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
+}
+
+void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js,
+ kbasep_js_atom_done_code done_code)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) {
+ KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 0);
+ } else {
+ /* Job finished in JS_HEAD */
+ base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
+
+ KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 0);
+ KBASE_TIMELINE_JOB_STOP(kctx, js, atom_number);
+
+ /* see if we need to trace the job in JS_NEXT moving to JS_HEAD */
+ if (kbase_backend_nr_atoms_submitted(kbdev, js)) {
+ struct kbase_jd_atom *next_katom;
+ struct kbase_context *next_kctx;
+
+ /* Peek the next atom - note that the atom in JS_HEAD will already
+ * have been dequeued */
+ next_katom = kbase_backend_inspect_head(kbdev, js);
+ WARN_ON(!next_katom);
+ next_kctx = next_katom->kctx;
+ KBASE_TIMELINE_JOB_START_NEXT(next_kctx, js, 0);
+ KBASE_TIMELINE_JOB_START_HEAD(next_kctx, js, 1);
+ KBASE_TIMELINE_JOB_START(next_kctx, js, kbase_jd_atom_id(next_kctx, next_katom));
+ }
+ }
+
+ --kbdev->timeline.slot_atoms_submitted[js];
+
+ KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
+}
+
+void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
+{
+ int uid = 0;
+ int old_uid;
+
+ /* If a producer already exists for the event, try to use their UID (multiple-producers) */
+ uid = atomic_read(&kbdev->timeline.pm_event_uid[event_sent]);
+ old_uid = uid;
+
+ /* Get a new non-zero UID if we don't have one yet */
+ while (!uid)
+ uid = atomic_inc_return(&kbdev->timeline.pm_event_uid_counter);
+
+ /* Try to use this UID */
+ if (old_uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event_sent], old_uid, uid))
+ /* If it changed, raced with another producer: we've lost this UID */
+ uid = 0;
+
+ KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_sent, uid);
+}
+
+void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
+{
+ int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
+
+ if (uid != 0) {
+ if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
+ /* If it changed, raced with another consumer: we've lost this UID */
+ uid = 0;
+
+ KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
+ }
+}
+
+void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
+{
+ int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
+
+ if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
+ /* If it changed, raced with another consumer: we've lost this UID */
+ uid = 0;
+
+ KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
+}
+
+void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ /* Simply log the start of the transition */
+ kbdev->timeline.l2_transitioning = true;
+ KBASE_TIMELINE_POWERING_L2(kbdev);
+}
+
+void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ /* Simply log the end of the transition */
+ if (kbdev->timeline.l2_transitioning) {
+ kbdev->timeline.l2_transitioning = false;
+ KBASE_TIMELINE_POWERED_L2(kbdev);
+ }
+}
+
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
diff --git a/midgard-0.r2p0/mali_kbase_trace_timeline.h b/midgard-0.r2p0/mali_kbase_trace_timeline.h
new file mode 100755
index 0000000..619072f
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_trace_timeline.h
@@ -0,0 +1,363 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#if !defined(_KBASE_TRACE_TIMELINE_H)
+#define _KBASE_TRACE_TIMELINE_H
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+
+enum kbase_trace_timeline_code {
+ #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) enum_val
+ #include "mali_kbase_trace_timeline_defs.h"
+ #undef KBASE_TIMELINE_TRACE_CODE
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+/** Initialize Timeline DebugFS entries */
+void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbasep_trace_timeline_debugfs_init CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+/* mali_timeline.h defines kernel tracepoints used by the KBASE_TIMELINE
+ * functions.
+ * Output is timestamped by either sched_clock() (default), local_clock(), or
+ * cpu_clock(), depending on /sys/kernel/debug/tracing/trace_clock */
+#include "mali_timeline.h"
+
+/* Trace number of atoms in flight for kctx (atoms either not completed, or in
+ process of being returned to user */
+#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_atoms_in_flight(ts.tv_sec, ts.tv_nsec, \
+ (int)kctx->timeline.owner_tgid, \
+ count); \
+ } while (0)
+
+/* Trace atom_id being Ready to Run */
+#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_atom(ts.tv_sec, ts.tv_nsec, \
+ CTX_FLOW_ATOM_READY, \
+ (int)kctx->timeline.owner_tgid, \
+ atom_id); \
+ } while (0)
+
+/* Trace number of atoms submitted to job slot js
+ *
+ * NOTE: This uses a different tracepoint to the head/next/soft-stop actions,
+ * so that those actions can be filtered out separately from this
+ *
+ * This is because this is more useful, as we can use it to calculate general
+ * utilization easily and accurately */
+#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_slot_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_SLOT_ACTIVE, \
+ (int)kctx->timeline.owner_tgid, \
+ js, count); \
+ } while (0)
+
+
+/* Trace atoms present in JS_NEXT */
+#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_SLOT_NEXT, \
+ (int)kctx->timeline.owner_tgid, \
+ js, count); \
+ } while (0)
+
+/* Trace atoms present in JS_HEAD */
+#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_SLOT_HEAD, \
+ (int)kctx->timeline.owner_tgid, \
+ js, count); \
+ } while (0)
+
+/* Trace that a soft stop/evict from next is being attempted on a slot */
+#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_SLOT_STOPPING, \
+ (kctx) ? (int)kctx->timeline.owner_tgid : 0, \
+ js, count); \
+ } while (0)
+
+
+
+/* Trace state of overall GPU power */
+#define KBASE_TIMELINE_GPU_POWER(kbdev, active) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_POWER_ACTIVE, active); \
+ } while (0)
+
+/* Trace state of tiler power */
+#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_POWER_TILER_ACTIVE, \
+ hweight64(bitmap)); \
+ } while (0)
+
+/* Trace number of shaders currently powered */
+#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_POWER_SHADER_ACTIVE, \
+ hweight64(bitmap)); \
+ } while (0)
+
+/* Trace state of L2 power */
+#define KBASE_TIMELINE_POWER_L2(kbdev, bitmap) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_SET_GPU_POWER_L2_ACTIVE, \
+ hweight64(bitmap)); \
+ } while (0)
+
+/* Trace state of L2 cache*/
+#define KBASE_TIMELINE_POWERING_L2(kbdev) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_FLOW_GPU_POWER_L2_POWERING, \
+ 1); \
+ } while (0)
+
+#define KBASE_TIMELINE_POWERED_L2(kbdev) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \
+ SW_FLOW_GPU_POWER_L2_ACTIVE, \
+ 1); \
+ } while (0)
+
+/* Trace kbase_pm_send_event message send */
+#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \
+ SW_FLOW_PM_SEND_EVENT, \
+ event_type, pm_event_id); \
+ } while (0)
+
+/* Trace kbase_pm_worker message receive */
+#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \
+ SW_FLOW_PM_HANDLE_EVENT, \
+ event_type, pm_event_id); \
+ } while (0)
+
+
+/* Trace atom_id starting in JS_HEAD */
+#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \
+ HW_START_GPU_JOB_CHAIN_SW_APPROX, \
+ (int)kctx->timeline.owner_tgid, \
+ js, _consumerof_atom_number); \
+ } while (0)
+
+/* Trace atom_id stopping on JS_HEAD */
+#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \
+ HW_STOP_GPU_JOB_CHAIN_SW_APPROX, \
+ (int)kctx->timeline.owner_tgid, \
+ js, _producerof_atom_number_completed); \
+ } while (0)
+
+/** Trace beginning/end of a call to kbase_pm_check_transitions_nolock from a
+ * certin caller */
+#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_pm_checktrans(ts.tv_sec, ts.tv_nsec, \
+ trace_code, 1); \
+ } while (0)
+
+/* Trace number of contexts active */
+#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) \
+ do { \
+ struct timespec ts; \
+ getrawmonotonic(&ts); \
+ trace_mali_timeline_context_active(ts.tv_sec, ts.tv_nsec, \
+ count); \
+ } while (0)
+
+/* NOTE: kbase_timeline_pm_cores_func() is in mali_kbase_pm_policy.c */
+
+/**
+ * Trace that an atom is starting on a job slot
+ *
+ * The caller must be holding hwaccess_lock
+ */
+void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js);
+
+/**
+ * Trace that an atom has done on a job slot
+ *
+ * 'Done' in this sense can occur either because:
+ * - the atom in JS_HEAD finished
+ * - the atom in JS_NEXT was evicted
+ *
+ * Whether the atom finished or was evicted is passed in @a done_code
+ *
+ * It is assumed that the atom has already been removed from the submit slot,
+ * with either:
+ * - kbasep_jm_dequeue_submit_slot()
+ * - kbasep_jm_dequeue_tail_submit_slot()
+ *
+ * The caller must be holding hwaccess_lock
+ */
+void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js,
+ kbasep_js_atom_done_code done_code);
+
+
+/** Trace a pm event starting */
+void kbase_timeline_pm_send_event(struct kbase_device *kbdev,
+ enum kbase_timeline_pm_event event_sent);
+
+/** Trace a pm event finishing */
+void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event);
+
+/** Check whether a pm event was present, and if so trace finishing it */
+void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event);
+
+/** Trace L2 power-up start */
+void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev);
+
+/** Trace L2 power-up done */
+void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev);
+
+#else
+
+#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) CSTD_NOP()
+
+#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) CSTD_NOP()
+
+#define KBASE_TIMELINE_GPU_POWER(kbdev, active) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWER_L2(kbdev, active) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWERING_L2(kbdev) CSTD_NOP()
+
+#define KBASE_TIMELINE_POWERED_L2(kbdev) CSTD_NOP()
+
+#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP()
+
+#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP()
+
+#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) CSTD_NOP()
+
+#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) CSTD_NOP()
+
+#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) CSTD_NOP()
+
+#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) CSTD_NOP()
+
+static inline void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+}
+
+static inline void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_jd_atom *katom, int js,
+ kbasep_js_atom_done_code done_code)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+}
+
+static inline void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
+{
+}
+
+static inline void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
+{
+}
+
+static inline void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
+{
+}
+
+static inline void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
+{
+}
+
+static inline void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
+{
+}
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+#endif /* _KBASE_TRACE_TIMELINE_H */
+
diff --git a/midgard-0.r2p0/mali_kbase_trace_timeline_defs.h b/midgard-0.r2p0/mali_kbase_trace_timeline_defs.h
new file mode 100755
index 0000000..156a95a
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_trace_timeline_defs.h
@@ -0,0 +1,140 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE *****
+ * ***** DO NOT INCLUDE DIRECTLY *****
+ * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
+
+/*
+ * Conventions on Event Names:
+ *
+ * - The prefix determines something about how the timeline should be
+ * displayed, and is split up into various parts, separated by underscores:
+ * - 'SW' and 'HW' as the first part will be used to determine whether a
+ * timeline is to do with Software or Hardware - effectively, separate
+ * 'channels' for Software and Hardware
+ * - 'START', 'STOP', 'ENTER', 'LEAVE' can be used in the second part, and
+ * signify related pairs of events - these are optional.
+ * - 'FLOW' indicates a generic event, which can use dependencies
+ * - This gives events such as:
+ * - 'SW_ENTER_FOO'
+ * - 'SW_LEAVE_FOO'
+ * - 'SW_FLOW_BAR_1'
+ * - 'SW_FLOW_BAR_2'
+ * - 'HW_START_BAZ'
+ * - 'HW_STOP_BAZ'
+ * - And an unadorned HW event:
+ * - 'HW_BAZ_FROZBOZ'
+ */
+
+/*
+ * Conventions on parameter names:
+ * - anything with 'instance' in the name will have a separate timeline based
+ * on that instances.
+ * - underscored-prefixed parameters will by hidden by default on timelines
+ *
+ * Hence:
+ * - Different job slots have their own 'instance', based on the instance value
+ * - Per-context info (e.g. atoms on a context) have their own 'instance'
+ * (i.e. each context should be on a different timeline)
+ *
+ * Note that globally-shared resources can be tagged with a tgid, but we don't
+ * want an instance per context:
+ * - There's no point having separate Job Slot timelines for each context, that
+ * would be confusing - there's only really 3 job slots!
+ * - There's no point having separate Shader-powered timelines for each
+ * context, that would be confusing - all shader cores (whether it be 4, 8,
+ * etc) are shared in the system.
+ */
+
+ /*
+ * CTX events
+ */
+ /* Separate timelines for each context 'instance'*/
+ KBASE_TIMELINE_TRACE_CODE(CTX_SET_NR_ATOMS_IN_FLIGHT, "CTX: Atoms in flight", "%d,%d", "_instance_tgid,_value_number_of_atoms"),
+ KBASE_TIMELINE_TRACE_CODE(CTX_FLOW_ATOM_READY, "CTX: Atoms Ready to Run", "%d,%d,%d", "_instance_tgid,_consumerof_atom_number,_producerof_atom_number_ready"),
+
+ /*
+ * SW Events
+ */
+ /* Separate timelines for each slot 'instance' */
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_ACTIVE, "SW: GPU slot active", "%d,%d,%d", "_tgid,_instance_slot,_value_number_of_atoms"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_NEXT, "SW: GPU atom in NEXT", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_next"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_HEAD, "SW: GPU atom in HEAD", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_head"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_STOPPING, "SW: Try Soft-Stop on GPU slot", "%d,%d,%d", "_tgid,_instance_slot,_value_is_slot_stopping"),
+ /* Shader and overall power is shared - can't have separate instances of
+ * it, just tagging with the context */
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_ACTIVE, "SW: GPU power active", "%d,%d", "_tgid,_value_is_power_active"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_TILER_ACTIVE, "SW: GPU tiler powered", "%d,%d", "_tgid,_value_number_of_tilers"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_SHADER_ACTIVE, "SW: GPU shaders powered", "%d,%d", "_tgid,_value_number_of_shaders"),
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powered", "%d,%d", "_tgid,_value_number_of_l2"),
+
+ /* SW Power event messaging. _event_type is one from the kbase_pm_event enum */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_SEND_EVENT, "SW: PM Send Event", "%d,%d,%d", "_tgid,_event_type,_writerof_pm_event_id"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_HANDLE_EVENT, "SW: PM Handle Event", "%d,%d,%d", "_tgid,_event_type,_finalconsumerof_pm_event_id"),
+ /* SW L2 power events */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_POWERING, "SW: GPU L2 powering", "%d,%d", "_tgid,_writerof_l2_transitioning"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powering done", "%d,%d", "_tgid,_finalconsumerof_l2_transitioning"),
+
+ KBASE_TIMELINE_TRACE_CODE(SW_SET_CONTEXT_ACTIVE, "SW: Context Active", "%d,%d", "_tgid,_value_active"),
+
+ /*
+ * BEGIN: Significant SW Functions that call kbase_pm_check_transitions_nolock()
+ */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweroff"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweroff"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweron"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweron"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_writerof_pm_checktrans_gpu_interrupt"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_gpu_interrupt"),
+
+ /*
+ * Significant Indirect callers of kbase_pm_check_transitions_nolock()
+ */
+ /* kbase_pm_request_cores */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader_tiler"),
+ /* kbase_pm_release_cores */
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader_tiler"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_shader_poweroff_callback"),
+ KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_shader_poweroff_callback"),
+ /*
+ * END: SW Functions that call kbase_pm_check_transitions_nolock()
+ */
+
+ /*
+ * HW Events
+ */
+ KBASE_TIMELINE_TRACE_CODE(HW_MMU_FAULT,
+"HW: MMU Fault", "%d,%d,%d", "_tgid,fault_type,fault_stage,asid"),
+ KBASE_TIMELINE_TRACE_CODE(HW_START_GPU_JOB_CHAIN_SW_APPROX,
+"HW: Job Chain start (SW approximated)", "%d,%d,%d",
+"_tgid,job_slot,_consumerof_atom_number_ready"),
+ KBASE_TIMELINE_TRACE_CODE(HW_STOP_GPU_JOB_CHAIN_SW_APPROX,
+"HW: Job Chain stop (SW approximated)", "%d,%d,%d",
+"_tgid,job_slot,_producerof_atom_number_completed")
diff --git a/midgard-0.r2p0/mali_kbase_uku.h b/midgard-0.r2p0/mali_kbase_uku.h
new file mode 100755
index 0000000..0d3e45d
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_uku.h
@@ -0,0 +1,551 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_UKU_H_
+#define _KBASE_UKU_H_
+
+#include "mali_uk.h"
+#include "mali_base_kernel.h"
+
+/* This file needs to support being included from kernel and userside (which use different defines) */
+#if defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON
+#define SUPPORT_MALI_ERROR_INJECT
+#endif /* defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON */
+#if defined(CONFIG_MALI_NO_MALI)
+#define SUPPORT_MALI_NO_MALI
+#elif defined(MALI_NO_MALI)
+#if MALI_NO_MALI
+#define SUPPORT_MALI_NO_MALI
+#endif
+#endif
+
+#if defined(SUPPORT_MALI_NO_MALI) || defined(SUPPORT_MALI_ERROR_INJECT)
+#include "backend/gpu/mali_kbase_model_dummy.h"
+#endif
+
+#include "mali_kbase_gpuprops_types.h"
+
+/*
+ * 10.1:
+ * - Do mmap in kernel for SAME_VA memory allocations rather then
+ * calling back into the kernel as a 2nd stage of the allocation request.
+ *
+ * 10.2:
+ * - Add KBASE_FUNC_MEM_JIT_INIT which allows clients to request a custom VA
+ * region for use with JIT (ignored on 32-bit platforms)
+ *
+ * 10.3:
+ * - base_jd_core_req typedef-ed to u32 (instead of to u16)
+ * - two flags added: BASE_JD_REQ_SKIP_CACHE_STAT / _END
+ *
+ * 10.4:
+ * - Removed KBASE_FUNC_EXT_BUFFER_LOCK used only in internal tests
+ *
+ * 10.5:
+ * - Reverted to performing mmap in user space so that tools like valgrind work.
+ *
+ * 10.6:
+ * - Add flags input variable to KBASE_FUNC_TLSTREAM_ACQUIRE
+ */
+#define BASE_UK_VERSION_MAJOR 10
+#define BASE_UK_VERSION_MINOR 6
+
+struct kbase_uk_mem_alloc {
+ union uk_header header;
+ /* IN */
+ u64 va_pages;
+ u64 commit_pages;
+ u64 extent;
+ /* IN/OUT */
+ u64 flags;
+ /* OUT */
+ u64 gpu_va;
+ u16 va_alignment;
+ u8 padding[6];
+};
+
+struct kbase_uk_mem_free {
+ union uk_header header;
+ /* IN */
+ u64 gpu_addr;
+ /* OUT */
+};
+
+struct kbase_uk_mem_alias {
+ union uk_header header;
+ /* IN/OUT */
+ u64 flags;
+ /* IN */
+ u64 stride;
+ u64 nents;
+ union kbase_pointer ai;
+ /* OUT */
+ u64 gpu_va;
+ u64 va_pages;
+};
+
+struct kbase_uk_mem_import {
+ union uk_header header;
+ /* IN */
+ union kbase_pointer phandle;
+ u32 type;
+ u32 padding;
+ /* IN/OUT */
+ u64 flags;
+ /* OUT */
+ u64 gpu_va;
+ u64 va_pages;
+};
+
+struct kbase_uk_mem_flags_change {
+ union uk_header header;
+ /* IN */
+ u64 gpu_va;
+ u64 flags;
+ u64 mask;
+};
+
+struct kbase_uk_job_submit {
+ union uk_header header;
+ /* IN */
+ union kbase_pointer addr;
+ u32 nr_atoms;
+ u32 stride; /* bytes between atoms, i.e. sizeof(base_jd_atom_v2) */
+ /* OUT */
+};
+
+struct kbase_uk_post_term {
+ union uk_header header;
+};
+
+struct kbase_uk_sync_now {
+ union uk_header header;
+
+ /* IN */
+ struct base_syncset sset;
+
+ /* OUT */
+};
+
+struct kbase_uk_hwcnt_setup {
+ union uk_header header;
+
+ /* IN */
+ u64 dump_buffer;
+ u32 jm_bm;
+ u32 shader_bm;
+ u32 tiler_bm;
+ u32 unused_1; /* keep for backwards compatibility */
+ u32 mmu_l2_bm;
+ u32 padding;
+ /* OUT */
+};
+
+/**
+ * struct kbase_uk_hwcnt_reader_setup - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @buffer_count: requested number of dumping buffers
+ * @jm_bm: counters selection bitmask (JM)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ * @fd: dumping notification file descriptor
+ *
+ * This structure sets up HWC dumper/reader for this context.
+ * Multiple instances can be created for single context.
+ */
+struct kbase_uk_hwcnt_reader_setup {
+ union uk_header header;
+
+ /* IN */
+ u32 buffer_count;
+ u32 jm_bm;
+ u32 shader_bm;
+ u32 tiler_bm;
+ u32 mmu_l2_bm;
+
+ /* OUT */
+ s32 fd;
+};
+
+struct kbase_uk_hwcnt_dump {
+ union uk_header header;
+};
+
+struct kbase_uk_hwcnt_clear {
+ union uk_header header;
+};
+
+struct kbase_uk_fence_validate {
+ union uk_header header;
+ /* IN */
+ s32 fd;
+ u32 padding;
+ /* OUT */
+};
+
+struct kbase_uk_stream_create {
+ union uk_header header;
+ /* IN */
+ char name[32];
+ /* OUT */
+ s32 fd;
+ u32 padding;
+};
+
+struct kbase_uk_gpuprops {
+ union uk_header header;
+
+ /* IN */
+ struct mali_base_gpu_props props;
+ /* OUT */
+};
+
+struct kbase_uk_mem_query {
+ union uk_header header;
+ /* IN */
+ u64 gpu_addr;
+#define KBASE_MEM_QUERY_COMMIT_SIZE 1
+#define KBASE_MEM_QUERY_VA_SIZE 2
+#define KBASE_MEM_QUERY_FLAGS 3
+ u64 query;
+ /* OUT */
+ u64 value;
+};
+
+struct kbase_uk_mem_commit {
+ union uk_header header;
+ /* IN */
+ u64 gpu_addr;
+ u64 pages;
+ /* OUT */
+ u32 result_subcode;
+ u32 padding;
+};
+
+struct kbase_uk_find_cpu_offset {
+ union uk_header header;
+ /* IN */
+ u64 gpu_addr;
+ u64 cpu_addr;
+ u64 size;
+ /* OUT */
+ u64 offset;
+};
+
+#define KBASE_GET_VERSION_BUFFER_SIZE 64
+struct kbase_uk_get_ddk_version {
+ union uk_header header;
+ /* OUT */
+ char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE];
+ u32 version_string_size;
+ u32 padding;
+};
+
+struct kbase_uk_disjoint_query {
+ union uk_header header;
+ /* OUT */
+ u32 counter;
+ u32 padding;
+};
+
+struct kbase_uk_set_flags {
+ union uk_header header;
+ /* IN */
+ u32 create_flags;
+ u32 padding;
+};
+
+#if MALI_UNIT_TEST
+#define TEST_ADDR_COUNT 4
+#define KBASE_TEST_BUFFER_SIZE 128
+struct kbase_exported_test_data {
+ u64 test_addr[TEST_ADDR_COUNT]; /**< memory address */
+ u32 test_addr_pages[TEST_ADDR_COUNT]; /**< memory size in pages */
+ union kbase_pointer kctx; /**< base context created by process */
+ union kbase_pointer mm; /**< pointer to process address space */
+ u8 buffer1[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */
+ u8 buffer2[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */
+};
+
+struct kbase_uk_set_test_data {
+ union uk_header header;
+ /* IN */
+ struct kbase_exported_test_data test_data;
+};
+
+#endif /* MALI_UNIT_TEST */
+
+#ifdef SUPPORT_MALI_ERROR_INJECT
+struct kbase_uk_error_params {
+ union uk_header header;
+ /* IN */
+ struct kbase_error_params params;
+};
+#endif /* SUPPORT_MALI_ERROR_INJECT */
+
+#ifdef SUPPORT_MALI_NO_MALI
+struct kbase_uk_model_control_params {
+ union uk_header header;
+ /* IN */
+ struct kbase_model_control_params params;
+};
+#endif /* SUPPORT_MALI_NO_MALI */
+
+#ifdef BASE_LEGACY_UK8_SUPPORT
+struct kbase_uk_keep_gpu_powered {
+ union uk_header header;
+ u32 enabled;
+ u32 padding;
+};
+#endif /* BASE_LEGACY_UK8_SUPPORT */
+
+struct kbase_uk_profiling_controls {
+ union uk_header header;
+ u32 profiling_controls[FBDUMP_CONTROL_MAX];
+};
+
+struct kbase_uk_debugfs_mem_profile_add {
+ union uk_header header;
+ u32 len;
+ u32 padding;
+ union kbase_pointer buf;
+};
+
+struct kbase_uk_context_id {
+ union uk_header header;
+ /* OUT */
+ int id;
+};
+
+#if (defined(MALI_MIPE_ENABLED) && MALI_MIPE_ENABLED) || \
+ !defined(MALI_MIPE_ENABLED)
+/**
+ * struct kbase_uk_tlstream_acquire - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @flags: timeline stream flags
+ * @fd: timeline stream file descriptor
+ *
+ * This structure is used when performing a call to acquire kernel side timeline
+ * stream file descriptor.
+ */
+struct kbase_uk_tlstream_acquire {
+ union uk_header header;
+ /* IN */
+ u32 flags;
+ /* OUT */
+ s32 fd;
+};
+
+/**
+ * struct kbase_uk_tlstream_acquire_v10_4 - User/Kernel space data exchange
+ * structure
+ * @header: UK structure header
+ * @fd: timeline stream file descriptor
+ *
+ * This structure is used when performing a call to acquire kernel side timeline
+ * stream file descriptor.
+ */
+struct kbase_uk_tlstream_acquire_v10_4 {
+ union uk_header header;
+ /* IN */
+ /* OUT */
+ s32 fd;
+};
+
+/**
+ * struct kbase_uk_tlstream_flush - User/Kernel space data exchange structure
+ * @header: UK structure header
+ *
+ * This structure is used when performing a call to flush kernel side
+ * timeline streams.
+ */
+struct kbase_uk_tlstream_flush {
+ union uk_header header;
+ /* IN */
+ /* OUT */
+};
+
+#if MALI_UNIT_TEST
+/**
+ * struct kbase_uk_tlstream_test - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @tpw_count: number of trace point writers in each context
+ * @msg_delay: time delay between tracepoints from one writer in milliseconds
+ * @msg_count: number of trace points written by one writer
+ * @aux_msg: if non-zero aux messages will be included
+ *
+ * This structure is used when performing a call to start timeline stream test
+ * embedded in kernel.
+ */
+struct kbase_uk_tlstream_test {
+ union uk_header header;
+ /* IN */
+ u32 tpw_count;
+ u32 msg_delay;
+ u32 msg_count;
+ u32 aux_msg;
+ /* OUT */
+};
+
+/**
+ * struct kbase_uk_tlstream_stats - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @bytes_collected: number of bytes read by user
+ * @bytes_generated: number of bytes generated by tracepoints
+ *
+ * This structure is used when performing a call to obtain timeline stream
+ * statistics.
+ */
+struct kbase_uk_tlstream_stats {
+ union uk_header header; /**< UK structure header. */
+ /* IN */
+ /* OUT */
+ u32 bytes_collected;
+ u32 bytes_generated;
+};
+#endif /* MALI_UNIT_TEST */
+#endif /* MALI_MIPE_ENABLED */
+
+/**
+ * struct struct kbase_uk_prfcnt_value for the KBASE_FUNC_SET_PRFCNT_VALUES ioctl
+ * @header: UK structure header
+ * @data: Counter samples for the dummy model
+ * @size:............Size of the counter sample data
+ */
+struct kbase_uk_prfcnt_values {
+ union uk_header header;
+ /* IN */
+ u32 *data;
+ u32 size;
+};
+
+/**
+ * struct kbase_uk_soft_event_update - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @evt: the GPU address containing the event
+ * @new_status: the new event status, must be either BASE_JD_SOFT_EVENT_SET or
+ * BASE_JD_SOFT_EVENT_RESET
+ * @flags: reserved for future uses, must be set to 0
+ *
+ * This structure is used to update the status of a software event. If the
+ * event's status is set to BASE_JD_SOFT_EVENT_SET, any job currently waiting
+ * on this event will complete.
+ */
+struct kbase_uk_soft_event_update {
+ union uk_header header;
+ /* IN */
+ u64 evt;
+ u32 new_status;
+ u32 flags;
+};
+
+/**
+ * struct kbase_uk_mem_jit_init - User/Kernel space data exchange structure
+ * @header: UK structure header
+ * @va_pages: Number of virtual pages required for JIT
+ *
+ * This structure is used when requesting initialization of JIT.
+ */
+struct kbase_uk_mem_jit_init {
+ union uk_header header;
+ /* IN */
+ u64 va_pages;
+};
+
+enum kbase_uk_function_id {
+ KBASE_FUNC_MEM_ALLOC = (UK_FUNC_ID + 0),
+ KBASE_FUNC_MEM_IMPORT = (UK_FUNC_ID + 1),
+ KBASE_FUNC_MEM_COMMIT = (UK_FUNC_ID + 2),
+ KBASE_FUNC_MEM_QUERY = (UK_FUNC_ID + 3),
+ KBASE_FUNC_MEM_FREE = (UK_FUNC_ID + 4),
+ KBASE_FUNC_MEM_FLAGS_CHANGE = (UK_FUNC_ID + 5),
+ KBASE_FUNC_MEM_ALIAS = (UK_FUNC_ID + 6),
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+ KBASE_FUNC_JOB_SUBMIT_UK6 = (UK_FUNC_ID + 7),
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+
+ KBASE_FUNC_SYNC = (UK_FUNC_ID + 8),
+
+ KBASE_FUNC_POST_TERM = (UK_FUNC_ID + 9),
+
+ KBASE_FUNC_HWCNT_SETUP = (UK_FUNC_ID + 10),
+ KBASE_FUNC_HWCNT_DUMP = (UK_FUNC_ID + 11),
+ KBASE_FUNC_HWCNT_CLEAR = (UK_FUNC_ID + 12),
+
+ KBASE_FUNC_GPU_PROPS_REG_DUMP = (UK_FUNC_ID + 14),
+
+ KBASE_FUNC_FIND_CPU_OFFSET = (UK_FUNC_ID + 15),
+
+ KBASE_FUNC_GET_VERSION = (UK_FUNC_ID + 16),
+ KBASE_FUNC_SET_FLAGS = (UK_FUNC_ID + 18),
+
+ KBASE_FUNC_SET_TEST_DATA = (UK_FUNC_ID + 19),
+ KBASE_FUNC_INJECT_ERROR = (UK_FUNC_ID + 20),
+ KBASE_FUNC_MODEL_CONTROL = (UK_FUNC_ID + 21),
+
+#ifdef BASE_LEGACY_UK8_SUPPORT
+ KBASE_FUNC_KEEP_GPU_POWERED = (UK_FUNC_ID + 22),
+#endif /* BASE_LEGACY_UK8_SUPPORT */
+
+ KBASE_FUNC_FENCE_VALIDATE = (UK_FUNC_ID + 23),
+ KBASE_FUNC_STREAM_CREATE = (UK_FUNC_ID + 24),
+ KBASE_FUNC_GET_PROFILING_CONTROLS = (UK_FUNC_ID + 25),
+ KBASE_FUNC_SET_PROFILING_CONTROLS = (UK_FUNC_ID + 26),
+ /* to be used only for testing
+ * purposes, otherwise these controls
+ * are set through gator API */
+
+ KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD = (UK_FUNC_ID + 27),
+ KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 28),
+ KBASE_FUNC_DISJOINT_QUERY = (UK_FUNC_ID + 29),
+
+ KBASE_FUNC_GET_CONTEXT_ID = (UK_FUNC_ID + 31),
+
+#if (defined(MALI_MIPE_ENABLED) && MALI_MIPE_ENABLED) || \
+ !defined(MALI_MIPE_ENABLED)
+ KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4 = (UK_FUNC_ID + 32),
+#if MALI_UNIT_TEST
+ KBASE_FUNC_TLSTREAM_TEST = (UK_FUNC_ID + 33),
+ KBASE_FUNC_TLSTREAM_STATS = (UK_FUNC_ID + 34),
+#endif /* MALI_UNIT_TEST */
+ KBASE_FUNC_TLSTREAM_FLUSH = (UK_FUNC_ID + 35),
+#endif /* MALI_MIPE_ENABLED */
+
+ KBASE_FUNC_HWCNT_READER_SETUP = (UK_FUNC_ID + 36),
+
+#ifdef SUPPORT_MALI_NO_MALI
+ KBASE_FUNC_SET_PRFCNT_VALUES = (UK_FUNC_ID + 37),
+#endif
+
+ KBASE_FUNC_SOFT_EVENT_UPDATE = (UK_FUNC_ID + 38),
+
+ KBASE_FUNC_MEM_JIT_INIT = (UK_FUNC_ID + 39),
+
+#if (defined(MALI_MIPE_ENABLED) && MALI_MIPE_ENABLED) || \
+ !defined(MALI_MIPE_ENABLED)
+ KBASE_FUNC_TLSTREAM_ACQUIRE = (UK_FUNC_ID + 40),
+#endif /* MALI_MIPE_ENABLED */
+
+ KBASE_FUNC_MAX
+};
+
+#endif /* _KBASE_UKU_H_ */
+
diff --git a/midgard-0.r2p0/mali_kbase_utility.c b/midgard-0.r2p0/mali_kbase_utility.c
new file mode 100755
index 0000000..be474ff
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_utility.c
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <mali_kbase.h>
+
+bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry)
+{
+ struct list_head *pos = base->next;
+
+ while (pos != base) {
+ if (pos == entry)
+ return true;
+
+ pos = pos->next;
+ }
+ return false;
+}
diff --git a/midgard-0.r2p0/mali_kbase_utility.h b/midgard-0.r2p0/mali_kbase_utility.h
new file mode 100755
index 0000000..fd7252d
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_utility.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_UTILITY_H
+#define _KBASE_UTILITY_H
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+/** Test whether the given list entry is a member of the given list.
+ *
+ * @param base The head of the list to be tested
+ * @param entry The list entry to be tested
+ *
+ * @return true if entry is a member of base
+ * false otherwise
+ */
+bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry);
+
+#endif /* _KBASE_UTILITY_H */
diff --git a/midgard-0.r2p0/mali_kbase_vinstr.c b/midgard-0.r2p0/mali_kbase_vinstr.c
new file mode 100755
index 0000000..3adb06d
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_vinstr.c
@@ -0,0 +1,2040 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
+#include <linux/hrtimer.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/preempt.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_hwcnt_reader.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_tlstream.h>
+
+/*****************************************************************************/
+
+/* Hwcnt reader API version */
+#define HWCNT_READER_API 1
+
+/* The number of nanoseconds in a second. */
+#define NSECS_IN_SEC 1000000000ull /* ns */
+
+/* The time resolution of dumping service. */
+#define DUMPING_RESOLUTION 500000ull /* ns */
+
+/* The maximal supported number of dumping buffers. */
+#define MAX_BUFFER_COUNT 32
+
+/* Size and number of hw counters blocks. */
+#define NR_CNT_BLOCKS_PER_GROUP 8
+#define NR_CNT_PER_BLOCK 64
+#define NR_BYTES_PER_CNT 4
+#define NR_BYTES_PER_HDR 16
+#define PRFCNT_EN_MASK_OFFSET 0x8
+
+/*****************************************************************************/
+
+enum {
+ SHADER_HWCNT_BM,
+ TILER_HWCNT_BM,
+ MMU_L2_HWCNT_BM,
+ JM_HWCNT_BM
+};
+
+enum vinstr_state {
+ VINSTR_IDLE,
+ VINSTR_DUMPING,
+ VINSTR_SUSPENDING,
+ VINSTR_SUSPENDED,
+ VINSTR_RESUMING
+};
+
+/**
+ * struct kbase_vinstr_context - vinstr context per device
+ * @lock: protects the entire vinstr context
+ * @kbdev: pointer to kbase device
+ * @kctx: pointer to kbase context
+ * @vmap: vinstr vmap for mapping hwcnt dump buffer
+ * @gpu_va: GPU hwcnt dump buffer address
+ * @cpu_va: the CPU side mapping of the hwcnt dump buffer
+ * @dump_size: size of the dump buffer in bytes
+ * @bitmap: current set of counters monitored, not always in sync
+ * with hardware
+ * @reprogram: when true, reprogram hwcnt block with the new set of
+ * counters
+ * @state: vinstr state
+ * @state_lock: protects information about vinstr state
+ * @suspend_waitq: notification queue to trigger state re-validation
+ * @suspend_cnt: reference counter of vinstr's suspend state
+ * @suspend_work: worker to execute on entering suspended state
+ * @resume_work: worker to execute on leaving suspended state
+ * @nclients: number of attached clients, pending or otherwise
+ * @waiting_clients: head of list of clients being periodically sampled
+ * @idle_clients: head of list of clients being idle
+ * @suspended_clients: head of list of clients being suspended
+ * @thread: periodic sampling thread
+ * @waitq: notification queue of sampling thread
+ * @request_pending: request for action for sampling thread
+ */
+struct kbase_vinstr_context {
+ struct mutex lock;
+ struct kbase_device *kbdev;
+ struct kbase_context *kctx;
+
+ struct kbase_vmap_struct vmap;
+ u64 gpu_va;
+ void *cpu_va;
+ size_t dump_size;
+ u32 bitmap[4];
+ bool reprogram;
+
+ enum vinstr_state state;
+ struct spinlock state_lock;
+ wait_queue_head_t suspend_waitq;
+ unsigned int suspend_cnt;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+
+ u32 nclients;
+ struct list_head waiting_clients;
+ struct list_head idle_clients;
+ struct list_head suspended_clients;
+
+ struct task_struct *thread;
+ wait_queue_head_t waitq;
+ atomic_t request_pending;
+};
+
+/**
+ * struct kbase_vinstr_client - a vinstr client attached to a vinstr context
+ * @vinstr_ctx: vinstr context client is attached to
+ * @list: node used to attach this client to list in vinstr context
+ * @buffer_count: number of buffers this client is using
+ * @event_mask: events this client reacts to
+ * @dump_size: size of one dump buffer in bytes
+ * @bitmap: bitmap request for JM, TILER, SHADER and MMU counters
+ * @legacy_buffer: userspace hwcnt dump buffer (legacy interface)
+ * @kernel_buffer: kernel hwcnt dump buffer (kernel client interface)
+ * @accum_buffer: temporary accumulation buffer for preserving counters
+ * @dump_time: next time this clients shall request hwcnt dump
+ * @dump_interval: interval between periodic hwcnt dumps
+ * @dump_buffers: kernel hwcnt dump buffers allocated by this client
+ * @dump_buffers_meta: metadata of dump buffers
+ * @meta_idx: index of metadata being accessed by userspace
+ * @read_idx: index of buffer read by userspace
+ * @write_idx: index of buffer being written by dumping service
+ * @waitq: client's notification queue
+ * @pending: when true, client has attached but hwcnt not yet updated
+ */
+struct kbase_vinstr_client {
+ struct kbase_vinstr_context *vinstr_ctx;
+ struct list_head list;
+ unsigned int buffer_count;
+ u32 event_mask;
+ size_t dump_size;
+ u32 bitmap[4];
+ void __user *legacy_buffer;
+ void *kernel_buffer;
+ void *accum_buffer;
+ u64 dump_time;
+ u32 dump_interval;
+ char *dump_buffers;
+ struct kbase_hwcnt_reader_metadata *dump_buffers_meta;
+ atomic_t meta_idx;
+ atomic_t read_idx;
+ atomic_t write_idx;
+ wait_queue_head_t waitq;
+ bool pending;
+};
+
+/**
+ * struct kbasep_vinstr_wake_up_timer - vinstr service thread wake up timer
+ * @hrtimer: high resolution timer
+ * @vinstr_ctx: vinstr context
+ */
+struct kbasep_vinstr_wake_up_timer {
+ struct hrtimer hrtimer;
+ struct kbase_vinstr_context *vinstr_ctx;
+};
+
+/*****************************************************************************/
+
+static int kbasep_vinstr_service_task(void *data);
+
+static unsigned int kbasep_vinstr_hwcnt_reader_poll(
+ struct file *filp,
+ poll_table *wait);
+static long kbasep_vinstr_hwcnt_reader_ioctl(
+ struct file *filp,
+ unsigned int cmd,
+ unsigned long arg);
+static int kbasep_vinstr_hwcnt_reader_mmap(
+ struct file *filp,
+ struct vm_area_struct *vma);
+static int kbasep_vinstr_hwcnt_reader_release(
+ struct inode *inode,
+ struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations vinstr_client_fops = {
+ .poll = kbasep_vinstr_hwcnt_reader_poll,
+ .unlocked_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
+ .compat_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
+ .mmap = kbasep_vinstr_hwcnt_reader_mmap,
+ .release = kbasep_vinstr_hwcnt_reader_release,
+};
+
+/*****************************************************************************/
+
+static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_context *kctx = vinstr_ctx->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbase_uk_hwcnt_setup setup;
+ int err;
+
+ setup.dump_buffer = vinstr_ctx->gpu_va;
+ setup.jm_bm = vinstr_ctx->bitmap[JM_HWCNT_BM];
+ setup.tiler_bm = vinstr_ctx->bitmap[TILER_HWCNT_BM];
+ setup.shader_bm = vinstr_ctx->bitmap[SHADER_HWCNT_BM];
+ setup.mmu_l2_bm = vinstr_ctx->bitmap[MMU_L2_HWCNT_BM];
+
+ /* Mark the context as active so the GPU is kept turned on */
+ /* A suspend won't happen here, because we're in a syscall from a
+ * userspace thread. */
+ kbase_pm_context_active(kbdev);
+
+ /* Schedule the context in */
+ kbasep_js_schedule_privileged_ctx(kbdev, kctx);
+ err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &setup);
+ if (err) {
+ /* Release the context. This had its own Power Manager Active
+ * reference */
+ kbasep_js_release_privileged_ctx(kbdev, kctx);
+
+ /* Also release our Power Manager Active reference */
+ kbase_pm_context_idle(kbdev);
+ }
+
+ return err;
+}
+
+static void disable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_context *kctx = vinstr_ctx->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ int err;
+
+ err = kbase_instr_hwcnt_disable_internal(kctx);
+ if (err) {
+ dev_warn(kbdev->dev, "Failed to disable HW counters (ctx:%p)",
+ kctx);
+ return;
+ }
+
+ /* Release the context. This had its own Power Manager Active reference. */
+ kbasep_js_release_privileged_ctx(kbdev, kctx);
+
+ /* Also release our Power Manager Active reference. */
+ kbase_pm_context_idle(kbdev);
+
+ dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx);
+}
+
+static int reprogram_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
+{
+ disable_hwcnt(vinstr_ctx);
+ return enable_hwcnt(vinstr_ctx);
+}
+
+static void hwcnt_bitmap_set(u32 dst[4], u32 src[4])
+{
+ dst[JM_HWCNT_BM] = src[JM_HWCNT_BM];
+ dst[TILER_HWCNT_BM] = src[TILER_HWCNT_BM];
+ dst[SHADER_HWCNT_BM] = src[SHADER_HWCNT_BM];
+ dst[MMU_L2_HWCNT_BM] = src[MMU_L2_HWCNT_BM];
+}
+
+static void hwcnt_bitmap_union(u32 dst[4], u32 src[4])
+{
+ dst[JM_HWCNT_BM] |= src[JM_HWCNT_BM];
+ dst[TILER_HWCNT_BM] |= src[TILER_HWCNT_BM];
+ dst[SHADER_HWCNT_BM] |= src[SHADER_HWCNT_BM];
+ dst[MMU_L2_HWCNT_BM] |= src[MMU_L2_HWCNT_BM];
+}
+
+size_t kbase_vinstr_dump_size(struct kbase_device *kbdev)
+{
+ size_t dump_size;
+
+#ifndef CONFIG_MALI_NO_MALI
+ if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_V4)) {
+ u32 nr_cg;
+
+ nr_cg = kbdev->gpu_props.num_core_groups;
+ dump_size = nr_cg * NR_CNT_BLOCKS_PER_GROUP *
+ NR_CNT_PER_BLOCK *
+ NR_BYTES_PER_CNT;
+ } else
+#endif /* CONFIG_MALI_NO_MALI */
+ {
+ /* assume v5 for now */
+ base_gpu_props *props = &kbdev->gpu_props.props;
+ u32 nr_l2 = props->l2_props.num_l2_slices;
+ u64 core_mask = props->coherency_info.group[0].core_mask;
+ u32 nr_blocks = fls64(core_mask);
+
+ /* JM and tiler counter blocks are always present */
+ dump_size = (2 + nr_l2 + nr_blocks) *
+ NR_CNT_PER_BLOCK *
+ NR_BYTES_PER_CNT;
+ }
+ return dump_size;
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_dump_size);
+
+static size_t kbasep_vinstr_dump_size_ctx(
+ struct kbase_vinstr_context *vinstr_ctx)
+{
+ return kbase_vinstr_dump_size(vinstr_ctx->kctx->kbdev);
+}
+
+static int kbasep_vinstr_map_kernel_dump_buffer(
+ struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_va_region *reg;
+ struct kbase_context *kctx = vinstr_ctx->kctx;
+ u64 flags, nr_pages;
+ u16 va_align = 0;
+
+ flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_WR;
+ vinstr_ctx->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx);
+ nr_pages = PFN_UP(vinstr_ctx->dump_size);
+
+ reg = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags,
+ &vinstr_ctx->gpu_va, &va_align);
+ if (!reg)
+ return -ENOMEM;
+
+ vinstr_ctx->cpu_va = kbase_vmap(
+ kctx,
+ vinstr_ctx->gpu_va,
+ vinstr_ctx->dump_size,
+ &vinstr_ctx->vmap);
+ if (!vinstr_ctx->cpu_va) {
+ kbase_mem_free(kctx, vinstr_ctx->gpu_va);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void kbasep_vinstr_unmap_kernel_dump_buffer(
+ struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_context *kctx = vinstr_ctx->kctx;
+
+ kbase_vunmap(kctx, &vinstr_ctx->vmap);
+ kbase_mem_free(kctx, vinstr_ctx->gpu_va);
+}
+
+/**
+ * kbasep_vinstr_create_kctx - create kernel context for vinstr
+ * @vinstr_ctx: vinstr context
+ * Return: zero on success
+ */
+static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_device *kbdev = vinstr_ctx->kbdev;
+ struct kbasep_kctx_list_element *element;
+ unsigned long flags;
+ bool enable_backend = false;
+ int err;
+
+ vinstr_ctx->kctx = kbase_create_context(vinstr_ctx->kbdev, true);
+ if (!vinstr_ctx->kctx)
+ return -ENOMEM;
+
+ /* Map the master kernel dump buffer. The HW dumps the counters
+ * into this memory region. */
+ err = kbasep_vinstr_map_kernel_dump_buffer(vinstr_ctx);
+ if (err) {
+ kbase_destroy_context(vinstr_ctx->kctx);
+ vinstr_ctx->kctx = NULL;
+ return err;
+ }
+
+ /* Add kernel context to list of contexts associated with device. */
+ element = kzalloc(sizeof(*element), GFP_KERNEL);
+ if (element) {
+ element->kctx = vinstr_ctx->kctx;
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_add(&element->link, &kbdev->kctx_list);
+
+ /* Inform timeline client about new context.
+ * Do this while holding the lock to avoid tracepoint
+ * being created in both body and summary stream. */
+ kbase_tlstream_tl_new_ctx(
+ vinstr_ctx->kctx,
+ (u32)(vinstr_ctx->kctx->id),
+ (u32)(vinstr_ctx->kctx->tgid));
+
+ mutex_unlock(&kbdev->kctx_list_lock);
+ } else {
+ /* Don't treat this as a fail - just warn about it. */
+ dev_warn(kbdev->dev,
+ "couldn't add kctx to kctx_list\n");
+ }
+
+ /* Don't enable hardware counters if vinstr is suspended.
+ * Note that vinstr resume code is run under vinstr context lock,
+ * lower layer will be enabled as needed on resume. */
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ if (VINSTR_IDLE == vinstr_ctx->state)
+ enable_backend = true;
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+ if (enable_backend)
+ err = enable_hwcnt(vinstr_ctx);
+
+ if (err) {
+ kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
+ kbase_destroy_context(vinstr_ctx->kctx);
+ if (element) {
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_del(&element->link);
+ kfree(element);
+ mutex_unlock(&kbdev->kctx_list_lock);
+ }
+ kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
+ vinstr_ctx->kctx = NULL;
+ return err;
+ }
+
+ vinstr_ctx->thread = kthread_run(
+ kbasep_vinstr_service_task,
+ vinstr_ctx,
+ "mali_vinstr_service");
+ if (!vinstr_ctx->thread) {
+ disable_hwcnt(vinstr_ctx);
+ kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
+ kbase_destroy_context(vinstr_ctx->kctx);
+ if (element) {
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_del(&element->link);
+ kfree(element);
+ mutex_unlock(&kbdev->kctx_list_lock);
+ }
+ kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
+ vinstr_ctx->kctx = NULL;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_destroy_kctx - destroy vinstr's kernel context
+ * @vinstr_ctx: vinstr context
+ */
+static void kbasep_vinstr_destroy_kctx(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_device *kbdev = vinstr_ctx->kbdev;
+ struct kbasep_kctx_list_element *element;
+ struct kbasep_kctx_list_element *tmp;
+ bool found = false;
+
+ /* Release hw counters dumping resources. */
+ vinstr_ctx->thread = NULL;
+ disable_hwcnt(vinstr_ctx);
+ kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
+ kbase_destroy_context(vinstr_ctx->kctx);
+
+ /* Remove kernel context from the device's contexts list. */
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
+ if (element->kctx == vinstr_ctx->kctx) {
+ list_del(&element->link);
+ kfree(element);
+ found = true;
+ }
+ }
+ mutex_unlock(&kbdev->kctx_list_lock);
+
+ if (!found)
+ dev_warn(kbdev->dev, "kctx not in kctx_list\n");
+
+ /* Inform timeline client about context destruction. */
+ kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
+
+ vinstr_ctx->kctx = NULL;
+}
+
+/**
+ * kbasep_vinstr_attach_client - Attach a client to the vinstr core
+ * @vinstr_ctx: vinstr context
+ * @buffer_count: requested number of dump buffers
+ * @bitmap: bitmaps describing which counters should be enabled
+ * @argp: pointer where notification descriptor shall be stored
+ * @kernel_buffer: pointer to kernel side buffer
+ *
+ * Return: vinstr opaque client handle or NULL on failure
+ */
+static struct kbase_vinstr_client *kbasep_vinstr_attach_client(
+ struct kbase_vinstr_context *vinstr_ctx, u32 buffer_count,
+ u32 bitmap[4], void *argp, void *kernel_buffer)
+{
+ struct task_struct *thread = NULL;
+ struct kbase_vinstr_client *cli;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ if (buffer_count > MAX_BUFFER_COUNT
+ || (buffer_count & (buffer_count - 1)))
+ return NULL;
+
+ cli = kzalloc(sizeof(*cli), GFP_KERNEL);
+ if (!cli)
+ return NULL;
+
+ cli->vinstr_ctx = vinstr_ctx;
+ cli->buffer_count = buffer_count;
+ cli->event_mask =
+ (1 << BASE_HWCNT_READER_EVENT_MANUAL) |
+ (1 << BASE_HWCNT_READER_EVENT_PERIODIC);
+ cli->pending = true;
+
+ hwcnt_bitmap_set(cli->bitmap, bitmap);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, cli->bitmap);
+ vinstr_ctx->reprogram = true;
+
+ /* If this is the first client, create the vinstr kbase
+ * context. This context is permanently resident until the
+ * last client exits. */
+ if (!vinstr_ctx->nclients) {
+ hwcnt_bitmap_set(vinstr_ctx->bitmap, cli->bitmap);
+ if (kbasep_vinstr_create_kctx(vinstr_ctx) < 0)
+ goto error;
+
+ vinstr_ctx->reprogram = false;
+ cli->pending = false;
+ }
+
+ /* The GPU resets the counter block every time there is a request
+ * to dump it. We need a per client kernel buffer for accumulating
+ * the counters. */
+ cli->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx);
+ cli->accum_buffer = kzalloc(cli->dump_size, GFP_KERNEL);
+ if (!cli->accum_buffer)
+ goto error;
+
+ /* Prepare buffers. */
+ if (cli->buffer_count) {
+ int *fd = (int *)argp;
+ size_t tmp;
+
+ /* Allocate area for buffers metadata storage. */
+ tmp = sizeof(struct kbase_hwcnt_reader_metadata) *
+ cli->buffer_count;
+ cli->dump_buffers_meta = kmalloc(tmp, GFP_KERNEL);
+ if (!cli->dump_buffers_meta)
+ goto error;
+
+ /* Allocate required number of dumping buffers. */
+ cli->dump_buffers = (char *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(cli->dump_size * cli->buffer_count));
+ if (!cli->dump_buffers)
+ goto error;
+
+ /* Create descriptor for user-kernel data exchange. */
+ *fd = anon_inode_getfd(
+ "[mali_vinstr_desc]",
+ &vinstr_client_fops,
+ cli,
+ O_RDONLY | O_CLOEXEC);
+ if (0 > *fd)
+ goto error;
+ } else if (kernel_buffer) {
+ cli->kernel_buffer = kernel_buffer;
+ } else {
+ cli->legacy_buffer = (void __user *)argp;
+ }
+
+ atomic_set(&cli->read_idx, 0);
+ atomic_set(&cli->meta_idx, 0);
+ atomic_set(&cli->write_idx, 0);
+ init_waitqueue_head(&cli->waitq);
+
+ vinstr_ctx->nclients++;
+ list_add(&cli->list, &vinstr_ctx->idle_clients);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return cli;
+
+error:
+ kfree(cli->dump_buffers_meta);
+ if (cli->dump_buffers)
+ free_pages(
+ (unsigned long)cli->dump_buffers,
+ get_order(cli->dump_size * cli->buffer_count));
+ kfree(cli->accum_buffer);
+ if (!vinstr_ctx->nclients && vinstr_ctx->kctx) {
+ thread = vinstr_ctx->thread;
+ kbasep_vinstr_destroy_kctx(vinstr_ctx);
+ }
+ kfree(cli);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Thread must be stopped after lock is released. */
+ if (thread)
+ kthread_stop(thread);
+
+ return NULL;
+}
+
+void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+ struct kbase_vinstr_client *iter, *tmp;
+ struct task_struct *thread = NULL;
+ u32 zerobitmap[4] = { 0 };
+ int cli_found = 0;
+
+ KBASE_DEBUG_ASSERT(cli);
+ vinstr_ctx = cli->vinstr_ctx;
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ list_for_each_entry_safe(iter, tmp, &vinstr_ctx->idle_clients, list) {
+ if (iter == cli) {
+ vinstr_ctx->reprogram = true;
+ cli_found = 1;
+ list_del(&iter->list);
+ break;
+ }
+ }
+ if (!cli_found) {
+ list_for_each_entry_safe(
+ iter, tmp, &vinstr_ctx->waiting_clients, list) {
+ if (iter == cli) {
+ vinstr_ctx->reprogram = true;
+ cli_found = 1;
+ list_del(&iter->list);
+ break;
+ }
+ }
+ }
+ KBASE_DEBUG_ASSERT(cli_found);
+
+ kfree(cli->dump_buffers_meta);
+ free_pages(
+ (unsigned long)cli->dump_buffers,
+ get_order(cli->dump_size * cli->buffer_count));
+ kfree(cli->accum_buffer);
+ kfree(cli);
+
+ vinstr_ctx->nclients--;
+ if (!vinstr_ctx->nclients) {
+ thread = vinstr_ctx->thread;
+ kbasep_vinstr_destroy_kctx(vinstr_ctx);
+ }
+
+ /* Rebuild context bitmap now that the client has detached */
+ hwcnt_bitmap_set(vinstr_ctx->bitmap, zerobitmap);
+ list_for_each_entry(iter, &vinstr_ctx->idle_clients, list)
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
+ list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list)
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Thread must be stopped after lock is released. */
+ if (thread)
+ kthread_stop(thread);
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_detach_client);
+
+/* Accumulate counters in the dump buffer */
+static void accum_dump_buffer(void *dst, void *src, size_t dump_size)
+{
+ size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT;
+ u32 *d = dst;
+ u32 *s = src;
+ size_t i, j;
+
+ for (i = 0; i < dump_size; i += block_size) {
+ /* skip over the header block */
+ d += NR_BYTES_PER_HDR / sizeof(u32);
+ s += NR_BYTES_PER_HDR / sizeof(u32);
+ for (j = 0; j < (block_size - NR_BYTES_PER_HDR) / sizeof(u32); j++) {
+ /* saturate result if addition would result in wraparound */
+ if (U32_MAX - *d < *s)
+ *d = U32_MAX;
+ else
+ *d += *s;
+ d++;
+ s++;
+ }
+ }
+}
+
+/* This is the Midgard v4 patch function. It copies the headers for each
+ * of the defined blocks from the master kernel buffer and then patches up
+ * the performance counter enable mask for each of the blocks to exclude
+ * counters that were not requested by the client. */
+static void patch_dump_buffer_hdr_v4(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_vinstr_client *cli)
+{
+ u32 *mask;
+ u8 *dst = cli->accum_buffer;
+ u8 *src = vinstr_ctx->cpu_va;
+ u32 nr_cg = vinstr_ctx->kctx->kbdev->gpu_props.num_core_groups;
+ size_t i, group_size, group;
+ enum {
+ SC0_BASE = 0 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ SC1_BASE = 1 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ SC2_BASE = 2 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ SC3_BASE = 3 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ TILER_BASE = 4 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ MMU_L2_BASE = 5 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT,
+ JM_BASE = 7 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT
+ };
+
+ group_size = NR_CNT_BLOCKS_PER_GROUP *
+ NR_CNT_PER_BLOCK *
+ NR_BYTES_PER_CNT;
+ for (i = 0; i < nr_cg; i++) {
+ group = i * group_size;
+ /* copy shader core headers */
+ memcpy(&dst[group + SC0_BASE], &src[group + SC0_BASE],
+ NR_BYTES_PER_HDR);
+ memcpy(&dst[group + SC1_BASE], &src[group + SC1_BASE],
+ NR_BYTES_PER_HDR);
+ memcpy(&dst[group + SC2_BASE], &src[group + SC2_BASE],
+ NR_BYTES_PER_HDR);
+ memcpy(&dst[group + SC3_BASE], &src[group + SC3_BASE],
+ NR_BYTES_PER_HDR);
+
+ /* copy tiler header */
+ memcpy(&dst[group + TILER_BASE], &src[group + TILER_BASE],
+ NR_BYTES_PER_HDR);
+
+ /* copy mmu header */
+ memcpy(&dst[group + MMU_L2_BASE], &src[group + MMU_L2_BASE],
+ NR_BYTES_PER_HDR);
+
+ /* copy job manager header */
+ memcpy(&dst[group + JM_BASE], &src[group + JM_BASE],
+ NR_BYTES_PER_HDR);
+
+ /* patch the shader core enable mask */
+ mask = (u32 *)&dst[group + SC0_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+ mask = (u32 *)&dst[group + SC1_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+ mask = (u32 *)&dst[group + SC2_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+ mask = (u32 *)&dst[group + SC3_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+
+ /* patch the tiler core enable mask */
+ mask = (u32 *)&dst[group + TILER_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[TILER_HWCNT_BM];
+
+ /* patch the mmu core enable mask */
+ mask = (u32 *)&dst[group + MMU_L2_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[MMU_L2_HWCNT_BM];
+
+ /* patch the job manager enable mask */
+ mask = (u32 *)&dst[group + JM_BASE + PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[JM_HWCNT_BM];
+ }
+}
+
+/* This is the Midgard v5 patch function. It copies the headers for each
+ * of the defined blocks from the master kernel buffer and then patches up
+ * the performance counter enable mask for each of the blocks to exclude
+ * counters that were not requested by the client. */
+static void patch_dump_buffer_hdr_v5(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_vinstr_client *cli)
+{
+ struct kbase_device *kbdev = vinstr_ctx->kctx->kbdev;
+ u32 i, nr_l2;
+ u64 core_mask;
+ u32 *mask;
+ u8 *dst = cli->accum_buffer;
+ u8 *src = vinstr_ctx->cpu_va;
+ size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT;
+
+ /* copy and patch job manager header */
+ memcpy(dst, src, NR_BYTES_PER_HDR);
+ mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[JM_HWCNT_BM];
+ dst += block_size;
+ src += block_size;
+
+ /* copy and patch tiler header */
+ memcpy(dst, src, NR_BYTES_PER_HDR);
+ mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[TILER_HWCNT_BM];
+ dst += block_size;
+ src += block_size;
+
+ /* copy and patch MMU/L2C headers */
+ nr_l2 = kbdev->gpu_props.props.l2_props.num_l2_slices;
+ for (i = 0; i < nr_l2; i++) {
+ memcpy(dst, src, NR_BYTES_PER_HDR);
+ mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[MMU_L2_HWCNT_BM];
+ dst += block_size;
+ src += block_size;
+ }
+
+ /* copy and patch shader core headers */
+ core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+ while (0ull != core_mask) {
+ memcpy(dst, src, NR_BYTES_PER_HDR);
+ if (0ull != (core_mask & 1ull)) {
+ /* if block is not reserved update header */
+ mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET];
+ *mask &= cli->bitmap[SHADER_HWCNT_BM];
+ }
+ dst += block_size;
+ src += block_size;
+
+ core_mask >>= 1;
+ }
+}
+
+/**
+ * accum_clients - accumulate dumped hw counters for all known clients
+ * @vinstr_ctx: vinstr context
+ */
+static void accum_clients(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_vinstr_client *iter;
+ int v4 = 0;
+
+#ifndef CONFIG_MALI_NO_MALI
+ v4 = kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4);
+#endif
+
+ list_for_each_entry(iter, &vinstr_ctx->idle_clients, list) {
+ /* Don't bother accumulating clients whose hwcnt requests
+ * have not yet been honoured. */
+ if (iter->pending)
+ continue;
+ if (v4)
+ patch_dump_buffer_hdr_v4(vinstr_ctx, iter);
+ else
+ patch_dump_buffer_hdr_v5(vinstr_ctx, iter);
+ accum_dump_buffer(
+ iter->accum_buffer,
+ vinstr_ctx->cpu_va,
+ iter->dump_size);
+ }
+ list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list) {
+ /* Don't bother accumulating clients whose hwcnt requests
+ * have not yet been honoured. */
+ if (iter->pending)
+ continue;
+ if (v4)
+ patch_dump_buffer_hdr_v4(vinstr_ctx, iter);
+ else
+ patch_dump_buffer_hdr_v5(vinstr_ctx, iter);
+ accum_dump_buffer(
+ iter->accum_buffer,
+ vinstr_ctx->cpu_va,
+ iter->dump_size);
+ }
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_vinstr_get_timestamp - return timestamp
+ *
+ * Function returns timestamp value based on raw monotonic timer. Value will
+ * wrap around zero in case of overflow.
+ *
+ * Return: timestamp value
+ */
+static u64 kbasep_vinstr_get_timestamp(void)
+{
+ struct timespec ts;
+
+ getrawmonotonic(&ts);
+ return (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
+}
+
+/**
+ * kbasep_vinstr_add_dump_request - register client's dumping request
+ * @cli: requesting client
+ * @waiting_clients: list of pending dumping requests
+ */
+static void kbasep_vinstr_add_dump_request(
+ struct kbase_vinstr_client *cli,
+ struct list_head *waiting_clients)
+{
+ struct kbase_vinstr_client *tmp;
+
+ if (list_empty(waiting_clients)) {
+ list_add(&cli->list, waiting_clients);
+ return;
+ }
+ list_for_each_entry(tmp, waiting_clients, list) {
+ if (tmp->dump_time > cli->dump_time) {
+ list_add_tail(&cli->list, &tmp->list);
+ return;
+ }
+ }
+ list_add_tail(&cli->list, waiting_clients);
+}
+
+/**
+ * kbasep_vinstr_collect_and_accumulate - collect hw counters via low level
+ * dump and accumulate them for known
+ * clients
+ * @vinstr_ctx: vinstr context
+ * @timestamp: pointer where collection timestamp will be recorded
+ *
+ * Return: zero on success
+ */
+static int kbasep_vinstr_collect_and_accumulate(
+ struct kbase_vinstr_context *vinstr_ctx, u64 *timestamp)
+{
+ unsigned long flags;
+ int rcode;
+
+#ifdef CONFIG_MALI_NO_MALI
+ /* The dummy model needs the CPU mapping. */
+ gpu_model_set_dummy_prfcnt_base_cpu(vinstr_ctx->cpu_va);
+#endif
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ if (VINSTR_IDLE != vinstr_ctx->state) {
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+ return -EAGAIN;
+ } else {
+ vinstr_ctx->state = VINSTR_DUMPING;
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ /* Request HW counters dump.
+ * Disable preemption to make dump timestamp more accurate. */
+ preempt_disable();
+ *timestamp = kbasep_vinstr_get_timestamp();
+ rcode = kbase_instr_hwcnt_request_dump(vinstr_ctx->kctx);
+ preempt_enable();
+
+ if (!rcode)
+ rcode = kbase_instr_hwcnt_wait_for_dump(vinstr_ctx->kctx);
+ WARN_ON(rcode);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ switch (vinstr_ctx->state)
+ {
+ case VINSTR_SUSPENDING:
+ schedule_work(&vinstr_ctx->suspend_work);
+ break;
+ case VINSTR_DUMPING:
+ vinstr_ctx->state = VINSTR_IDLE;
+ wake_up_all(&vinstr_ctx->suspend_waitq);
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ /* Accumulate values of collected counters. */
+ if (!rcode)
+ accum_clients(vinstr_ctx);
+
+ return rcode;
+}
+
+/**
+ * kbasep_vinstr_fill_dump_buffer - copy accumulated counters to empty kernel
+ * buffer
+ * @cli: requesting client
+ * @timestamp: timestamp when counters were collected
+ * @event_id: id of event that caused triggered counters collection
+ *
+ * Return: zero on success
+ */
+static int kbasep_vinstr_fill_dump_buffer(
+ struct kbase_vinstr_client *cli, u64 timestamp,
+ enum base_hwcnt_reader_event event_id)
+{
+ unsigned int write_idx = atomic_read(&cli->write_idx);
+ unsigned int read_idx = atomic_read(&cli->read_idx);
+
+ struct kbase_hwcnt_reader_metadata *meta;
+ void *buffer;
+
+ /* Check if there is a place to copy HWC block into. */
+ if (write_idx - read_idx == cli->buffer_count)
+ return -1;
+ write_idx %= cli->buffer_count;
+
+ /* Fill in dump buffer and its metadata. */
+ buffer = &cli->dump_buffers[write_idx * cli->dump_size];
+ meta = &cli->dump_buffers_meta[write_idx];
+ meta->timestamp = timestamp;
+ meta->event_id = event_id;
+ meta->buffer_idx = write_idx;
+ memcpy(buffer, cli->accum_buffer, cli->dump_size);
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_fill_dump_buffer_legacy - copy accumulated counters to buffer
+ * allocated in userspace
+ * @cli: requesting client
+ *
+ * Return: zero on success
+ *
+ * This is part of legacy ioctl interface.
+ */
+static int kbasep_vinstr_fill_dump_buffer_legacy(
+ struct kbase_vinstr_client *cli)
+{
+ void __user *buffer = cli->legacy_buffer;
+ int rcode;
+
+ /* Copy data to user buffer. */
+ rcode = copy_to_user(buffer, cli->accum_buffer, cli->dump_size);
+ if (rcode)
+ pr_warn("error while copying buffer to user\n");
+ return rcode;
+}
+
+/**
+ * kbasep_vinstr_fill_dump_buffer_kernel - copy accumulated counters to buffer
+ * allocated in kernel space
+ * @cli: requesting client
+ *
+ * Return: zero on success
+ *
+ * This is part of the kernel client interface.
+ */
+static int kbasep_vinstr_fill_dump_buffer_kernel(
+ struct kbase_vinstr_client *cli)
+{
+ memcpy(cli->kernel_buffer, cli->accum_buffer, cli->dump_size);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_reprogram - reprogram hwcnt set collected by inst
+ * @vinstr_ctx: vinstr context
+ */
+static void kbasep_vinstr_reprogram(
+ struct kbase_vinstr_context *vinstr_ctx)
+{
+ unsigned long flags;
+ bool suspended = false;
+
+ /* Don't enable hardware counters if vinstr is suspended. */
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ if (VINSTR_IDLE != vinstr_ctx->state)
+ suspended = true;
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+ if (suspended)
+ return;
+
+ /* Change to suspended state is done while holding vinstr context
+ * lock. Below code will then no re-enable the instrumentation. */
+
+ if (vinstr_ctx->reprogram) {
+ struct kbase_vinstr_client *iter;
+
+ if (!reprogram_hwcnt(vinstr_ctx)) {
+ vinstr_ctx->reprogram = false;
+ list_for_each_entry(
+ iter,
+ &vinstr_ctx->idle_clients,
+ list)
+ iter->pending = false;
+ list_for_each_entry(
+ iter,
+ &vinstr_ctx->waiting_clients,
+ list)
+ iter->pending = false;
+ }
+ }
+}
+
+/**
+ * kbasep_vinstr_update_client - copy accumulated counters to user readable
+ * buffer and notify the user
+ * @cli: requesting client
+ * @timestamp: timestamp when counters were collected
+ * @event_id: id of event that caused triggered counters collection
+ *
+ * Return: zero on success
+ */
+static int kbasep_vinstr_update_client(
+ struct kbase_vinstr_client *cli, u64 timestamp,
+ enum base_hwcnt_reader_event event_id)
+{
+ int rcode = 0;
+
+ /* Copy collected counters to user readable buffer. */
+ if (cli->buffer_count)
+ rcode = kbasep_vinstr_fill_dump_buffer(
+ cli, timestamp, event_id);
+ else if (cli->kernel_buffer)
+ rcode = kbasep_vinstr_fill_dump_buffer_kernel(cli);
+ else
+ rcode = kbasep_vinstr_fill_dump_buffer_legacy(cli);
+
+ if (rcode)
+ goto exit;
+
+
+ /* Notify client. Make sure all changes to memory are visible. */
+ wmb();
+ atomic_inc(&cli->write_idx);
+ wake_up_interruptible(&cli->waitq);
+
+ /* Prepare for next request. */
+ memset(cli->accum_buffer, 0, cli->dump_size);
+
+exit:
+ return rcode;
+}
+
+/**
+ * kbasep_vinstr_wake_up_callback - vinstr wake up timer wake up function
+ *
+ * @hrtimer: high resolution timer
+ *
+ * Return: High resolution timer restart enum.
+ */
+static enum hrtimer_restart kbasep_vinstr_wake_up_callback(
+ struct hrtimer *hrtimer)
+{
+ struct kbasep_vinstr_wake_up_timer *timer =
+ container_of(
+ hrtimer,
+ struct kbasep_vinstr_wake_up_timer,
+ hrtimer);
+
+ KBASE_DEBUG_ASSERT(timer);
+
+ atomic_set(&timer->vinstr_ctx->request_pending, 1);
+ wake_up_all(&timer->vinstr_ctx->waitq);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * kbasep_vinstr_service_task - HWC dumping service thread
+ *
+ * @data: Pointer to vinstr context structure.
+ *
+ * Return: Always returns zero.
+ */
+static int kbasep_vinstr_service_task(void *data)
+{
+ struct kbase_vinstr_context *vinstr_ctx = data;
+ struct kbasep_vinstr_wake_up_timer timer;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ hrtimer_init(&timer.hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer.hrtimer.function = kbasep_vinstr_wake_up_callback;
+ timer.vinstr_ctx = vinstr_ctx;
+
+ while (!kthread_should_stop()) {
+ struct kbase_vinstr_client *cli = NULL;
+ struct kbase_vinstr_client *tmp;
+ int rcode;
+
+ u64 timestamp = kbasep_vinstr_get_timestamp();
+ u64 dump_time = 0;
+ struct list_head expired_requests;
+
+ /* Hold lock while performing operations on lists of clients. */
+ mutex_lock(&vinstr_ctx->lock);
+
+ /* Closing thread must not interact with client requests. */
+ if (current == vinstr_ctx->thread) {
+ atomic_set(&vinstr_ctx->request_pending, 0);
+
+ if (!list_empty(&vinstr_ctx->waiting_clients)) {
+ cli = list_first_entry(
+ &vinstr_ctx->waiting_clients,
+ struct kbase_vinstr_client,
+ list);
+ dump_time = cli->dump_time;
+ }
+ }
+
+ if (!cli || ((s64)timestamp - (s64)dump_time < 0ll)) {
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Sleep until next dumping event or service request. */
+ if (cli) {
+ u64 diff = dump_time - timestamp;
+
+ hrtimer_start(
+ &timer.hrtimer,
+ ns_to_ktime(diff),
+ HRTIMER_MODE_REL);
+ }
+ wait_event(
+ vinstr_ctx->waitq,
+ atomic_read(
+ &vinstr_ctx->request_pending) ||
+ kthread_should_stop());
+ hrtimer_cancel(&timer.hrtimer);
+ continue;
+ }
+
+ rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx,
+ &timestamp);
+
+ INIT_LIST_HEAD(&expired_requests);
+
+ /* Find all expired requests. */
+ list_for_each_entry_safe(
+ cli,
+ tmp,
+ &vinstr_ctx->waiting_clients,
+ list) {
+ s64 tdiff =
+ (s64)(timestamp + DUMPING_RESOLUTION) -
+ (s64)cli->dump_time;
+ if (tdiff >= 0ll) {
+ list_del(&cli->list);
+ list_add(&cli->list, &expired_requests);
+ } else {
+ break;
+ }
+ }
+
+ /* Fill data for each request found. */
+ list_for_each_entry_safe(cli, tmp, &expired_requests, list) {
+ /* Ensure that legacy buffer will not be used from
+ * this kthread context. */
+ BUG_ON(0 == cli->buffer_count);
+ /* Expect only periodically sampled clients. */
+ BUG_ON(0 == cli->dump_interval);
+
+ if (!rcode)
+ kbasep_vinstr_update_client(
+ cli,
+ timestamp,
+ BASE_HWCNT_READER_EVENT_PERIODIC);
+
+ /* Set new dumping time. Drop missed probing times. */
+ do {
+ cli->dump_time += cli->dump_interval;
+ } while (cli->dump_time < timestamp);
+
+ list_del(&cli->list);
+ kbasep_vinstr_add_dump_request(
+ cli,
+ &vinstr_ctx->waiting_clients);
+ }
+
+ /* Reprogram counters set if required. */
+ kbasep_vinstr_reprogram(vinstr_ctx);
+
+ mutex_unlock(&vinstr_ctx->lock);
+ }
+
+ return 0;
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_vinstr_hwcnt_reader_buffer_ready - check if client has ready buffers
+ * @cli: pointer to vinstr client structure
+ *
+ * Return: non-zero if client has at least one dumping buffer filled that was
+ * not notified to user yet
+ */
+static int kbasep_vinstr_hwcnt_reader_buffer_ready(
+ struct kbase_vinstr_client *cli)
+{
+ KBASE_DEBUG_ASSERT(cli);
+ return atomic_read(&cli->write_idx) != atomic_read(&cli->meta_idx);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_get_buffer - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @buffer: pointer to userspace buffer
+ * @size: size of buffer
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
+ struct kbase_vinstr_client *cli, void __user *buffer,
+ size_t size)
+{
+ unsigned int meta_idx = atomic_read(&cli->meta_idx);
+ unsigned int idx = meta_idx % cli->buffer_count;
+
+ struct kbase_hwcnt_reader_metadata *meta = &cli->dump_buffers_meta[idx];
+
+ /* Metadata sanity check. */
+ KBASE_DEBUG_ASSERT(idx == meta->buffer_idx);
+
+ if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
+ return -EINVAL;
+
+ /* Check if there is any buffer available. */
+ if (atomic_read(&cli->write_idx) == meta_idx)
+ return -EAGAIN;
+
+ /* Check if previously taken buffer was put back. */
+ if (atomic_read(&cli->read_idx) != meta_idx)
+ return -EBUSY;
+
+ /* Copy next available buffer's metadata to user. */
+ if (copy_to_user(buffer, meta, size))
+ return -EFAULT;
+
+ atomic_inc(&cli->meta_idx);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_put_buffer - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @buffer: pointer to userspace buffer
+ * @size: size of buffer
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
+ struct kbase_vinstr_client *cli, void __user *buffer,
+ size_t size)
+{
+ unsigned int read_idx = atomic_read(&cli->read_idx);
+ unsigned int idx = read_idx % cli->buffer_count;
+
+ struct kbase_hwcnt_reader_metadata meta;
+
+ if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
+ return -EINVAL;
+
+ /* Check if any buffer was taken. */
+ if (atomic_read(&cli->meta_idx) == read_idx)
+ return -EPERM;
+
+ /* Check if correct buffer is put back. */
+ if (copy_from_user(&meta, buffer, size))
+ return -EFAULT;
+ if (idx != meta.buffer_idx)
+ return -EINVAL;
+
+ atomic_inc(&cli->read_idx);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_set_interval - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @interval: periodic dumping interval (disable periodic dumping if zero)
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
+ struct kbase_vinstr_client *cli, u32 interval)
+{
+ struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ list_del(&cli->list);
+
+ cli->dump_interval = interval;
+
+ /* If interval is non-zero, enable periodic dumping for this client. */
+ if (cli->dump_interval) {
+ if (DUMPING_RESOLUTION > cli->dump_interval)
+ cli->dump_interval = DUMPING_RESOLUTION;
+ cli->dump_time =
+ kbasep_vinstr_get_timestamp() + cli->dump_interval;
+
+ kbasep_vinstr_add_dump_request(
+ cli, &vinstr_ctx->waiting_clients);
+
+ atomic_set(&vinstr_ctx->request_pending, 1);
+ wake_up_all(&vinstr_ctx->waitq);
+ } else {
+ list_add(&cli->list, &vinstr_ctx->idle_clients);
+ }
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_event_mask - return event mask for event id
+ * @event_id: id of event
+ * Return: event_mask or zero if event is not supported or maskable
+ */
+static u32 kbasep_vinstr_hwcnt_reader_event_mask(
+ enum base_hwcnt_reader_event event_id)
+{
+ u32 event_mask = 0;
+
+ switch (event_id) {
+ case BASE_HWCNT_READER_EVENT_PREJOB:
+ case BASE_HWCNT_READER_EVENT_POSTJOB:
+ /* These event are maskable. */
+ event_mask = (1 << event_id);
+ break;
+
+ case BASE_HWCNT_READER_EVENT_MANUAL:
+ case BASE_HWCNT_READER_EVENT_PERIODIC:
+ /* These event are non-maskable. */
+ default:
+ /* These event are not supported. */
+ break;
+ }
+
+ return event_mask;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_enable_event - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @event_id: id of event to enable
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
+ struct kbase_vinstr_client *cli,
+ enum base_hwcnt_reader_event event_id)
+{
+ struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+ u32 event_mask;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id);
+ if (!event_mask)
+ return -EINVAL;
+
+ mutex_lock(&vinstr_ctx->lock);
+ cli->event_mask |= event_mask;
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_disable_event - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @event_id: id of event to disable
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
+ struct kbase_vinstr_client *cli,
+ enum base_hwcnt_reader_event event_id)
+{
+ struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+ u32 event_mask;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id);
+ if (!event_mask)
+ return -EINVAL;
+
+ mutex_lock(&vinstr_ctx->lock);
+ cli->event_mask &= ~event_mask;
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_get_hwver - hwcnt reader's ioctl command
+ * @cli: pointer to vinstr client structure
+ * @hwver: pointer to user buffer where hw version will be stored
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
+ struct kbase_vinstr_client *cli, u32 __user *hwver)
+{
+#ifndef CONFIG_MALI_NO_MALI
+ struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+#endif
+
+ u32 ver = 5;
+
+#ifndef CONFIG_MALI_NO_MALI
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+ if (kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4))
+ ver = 4;
+#endif
+
+ return put_user(ver, hwver);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl - hwcnt reader's ioctl
+ * @filp: pointer to file structure
+ * @cmd: user command
+ * @arg: command's argument
+ *
+ * Return: zero on success
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ long rcode = 0;
+ struct kbase_vinstr_client *cli;
+
+ KBASE_DEBUG_ASSERT(filp);
+
+ cli = filp->private_data;
+ KBASE_DEBUG_ASSERT(cli);
+
+ if (unlikely(KBASE_HWCNT_READER != _IOC_TYPE(cmd)))
+ return -EINVAL;
+
+ switch (cmd) {
+ case KBASE_HWCNT_READER_GET_API_VERSION:
+ rcode = put_user(HWCNT_READER_API, (u32 __user *)arg);
+ break;
+ case KBASE_HWCNT_READER_GET_HWVER:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
+ cli, (u32 __user *)arg);
+ break;
+ case KBASE_HWCNT_READER_GET_BUFFER_SIZE:
+ KBASE_DEBUG_ASSERT(cli->vinstr_ctx);
+ rcode = put_user(
+ (u32)cli->vinstr_ctx->dump_size,
+ (u32 __user *)arg);
+ break;
+ case KBASE_HWCNT_READER_DUMP:
+ rcode = kbase_vinstr_hwc_dump(
+ cli, BASE_HWCNT_READER_EVENT_MANUAL);
+ break;
+ case KBASE_HWCNT_READER_CLEAR:
+ rcode = kbase_vinstr_hwc_clear(cli);
+ break;
+ case KBASE_HWCNT_READER_GET_BUFFER:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
+ cli, (void __user *)arg, _IOC_SIZE(cmd));
+ break;
+ case KBASE_HWCNT_READER_PUT_BUFFER:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
+ cli, (void __user *)arg, _IOC_SIZE(cmd));
+ break;
+ case KBASE_HWCNT_READER_SET_INTERVAL:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
+ cli, (u32)arg);
+ break;
+ case KBASE_HWCNT_READER_ENABLE_EVENT:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
+ cli, (enum base_hwcnt_reader_event)arg);
+ break;
+ case KBASE_HWCNT_READER_DISABLE_EVENT:
+ rcode = kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
+ cli, (enum base_hwcnt_reader_event)arg);
+ break;
+ default:
+ rcode = -EINVAL;
+ break;
+ }
+
+ return rcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_poll - hwcnt reader's poll
+ * @filp: pointer to file structure
+ * @wait: pointer to poll table
+ * Return: POLLIN if data can be read without blocking, otherwise zero
+ */
+static unsigned int kbasep_vinstr_hwcnt_reader_poll(struct file *filp,
+ poll_table *wait)
+{
+ struct kbase_vinstr_client *cli;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(wait);
+
+ cli = filp->private_data;
+ KBASE_DEBUG_ASSERT(cli);
+
+ poll_wait(filp, &cli->waitq, wait);
+ if (kbasep_vinstr_hwcnt_reader_buffer_ready(cli))
+ return POLLIN;
+ return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_mmap - hwcnt reader's mmap
+ * @filp: pointer to file structure
+ * @vma: pointer to vma structure
+ * Return: zero on success
+ */
+static int kbasep_vinstr_hwcnt_reader_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct kbase_vinstr_client *cli;
+ unsigned long size, addr, pfn, offset;
+ unsigned long vm_size = vma->vm_end - vma->vm_start;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(vma);
+
+ cli = filp->private_data;
+ KBASE_DEBUG_ASSERT(cli);
+
+ size = cli->buffer_count * cli->dump_size;
+
+ if (vma->vm_pgoff > (size >> PAGE_SHIFT))
+ return -EINVAL;
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ if (vm_size > size - offset)
+ return -EINVAL;
+
+ addr = __pa((unsigned long)cli->dump_buffers + offset);
+ pfn = addr >> PAGE_SHIFT;
+
+ return remap_pfn_range(
+ vma,
+ vma->vm_start,
+ pfn,
+ vm_size,
+ vma->vm_page_prot);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_release - hwcnt reader's release
+ * @inode: pointer to inode structure
+ * @filp: pointer to file structure
+ * Return always return zero
+ */
+static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode,
+ struct file *filp)
+{
+ struct kbase_vinstr_client *cli;
+
+ KBASE_DEBUG_ASSERT(inode);
+ KBASE_DEBUG_ASSERT(filp);
+
+ cli = filp->private_data;
+ KBASE_DEBUG_ASSERT(cli);
+
+ kbase_vinstr_detach_client(cli);
+ return 0;
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_vinstr_kick_scheduler - trigger scheduler cycle
+ * @kbdev: pointer to kbase device structure
+ */
+static void kbasep_vinstr_kick_scheduler(struct kbase_device *kbdev)
+{
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned long flags;
+
+ down(&js_devdata->schedule_sem);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_backend_slot_update(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ up(&js_devdata->schedule_sem);
+}
+
+/**
+ * kbasep_vinstr_suspend_worker - worker suspending vinstr module
+ * @data: pointer to work structure
+ */
+static void kbasep_vinstr_suspend_worker(struct work_struct *data)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+ unsigned long flags;
+
+ vinstr_ctx = container_of(data, struct kbase_vinstr_context,
+ suspend_work);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ if (vinstr_ctx->kctx)
+ disable_hwcnt(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ vinstr_ctx->state = VINSTR_SUSPENDED;
+ wake_up_all(&vinstr_ctx->suspend_waitq);
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Kick GPU scheduler to allow entering protected mode.
+ * This must happen after vinstr was suspended. */
+ kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
+}
+
+/**
+ * kbasep_vinstr_suspend_worker - worker resuming vinstr module
+ * @data: pointer to work structure
+ */
+static void kbasep_vinstr_resume_worker(struct work_struct *data)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+ unsigned long flags;
+
+ vinstr_ctx = container_of(data, struct kbase_vinstr_context,
+ resume_work);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ if (vinstr_ctx->kctx)
+ enable_hwcnt(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ vinstr_ctx->state = VINSTR_IDLE;
+ wake_up_all(&vinstr_ctx->suspend_waitq);
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Kick GPU scheduler to allow entering protected mode.
+ * Note that scheduler state machine might requested re-entry to
+ * protected mode before vinstr was resumed.
+ * This must happen after vinstr was release. */
+ kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
+}
+
+/*****************************************************************************/
+
+struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+
+ vinstr_ctx = kzalloc(sizeof(*vinstr_ctx), GFP_KERNEL);
+ if (!vinstr_ctx)
+ return NULL;
+
+ INIT_LIST_HEAD(&vinstr_ctx->idle_clients);
+ INIT_LIST_HEAD(&vinstr_ctx->waiting_clients);
+ mutex_init(&vinstr_ctx->lock);
+ spin_lock_init(&vinstr_ctx->state_lock);
+ vinstr_ctx->kbdev = kbdev;
+ vinstr_ctx->thread = NULL;
+ vinstr_ctx->state = VINSTR_IDLE;
+ vinstr_ctx->suspend_cnt = 0;
+ INIT_WORK(&vinstr_ctx->suspend_work, kbasep_vinstr_suspend_worker);
+ INIT_WORK(&vinstr_ctx->resume_work, kbasep_vinstr_resume_worker);
+ init_waitqueue_head(&vinstr_ctx->suspend_waitq);
+
+ atomic_set(&vinstr_ctx->request_pending, 0);
+ init_waitqueue_head(&vinstr_ctx->waitq);
+
+ return vinstr_ctx;
+}
+
+void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx)
+{
+ struct kbase_vinstr_client *cli;
+
+ /* Stop service thread first. */
+ if (vinstr_ctx->thread)
+ kthread_stop(vinstr_ctx->thread);
+
+ /* Wait for workers. */
+ flush_work(&vinstr_ctx->suspend_work);
+ flush_work(&vinstr_ctx->resume_work);
+
+ while (1) {
+ struct list_head *list = &vinstr_ctx->idle_clients;
+
+ if (list_empty(list)) {
+ list = &vinstr_ctx->waiting_clients;
+ if (list_empty(list))
+ break;
+ }
+
+ cli = list_first_entry(list, struct kbase_vinstr_client, list);
+ list_del(&cli->list);
+ kfree(cli->accum_buffer);
+ kfree(cli);
+ vinstr_ctx->nclients--;
+ }
+ KBASE_DEBUG_ASSERT(!vinstr_ctx->nclients);
+ if (vinstr_ctx->kctx)
+ kbasep_vinstr_destroy_kctx(vinstr_ctx);
+ kfree(vinstr_ctx);
+}
+
+int kbase_vinstr_hwcnt_reader_setup(struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_uk_hwcnt_reader_setup *setup)
+{
+ struct kbase_vinstr_client *cli;
+ u32 bitmap[4];
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+ KBASE_DEBUG_ASSERT(setup);
+ KBASE_DEBUG_ASSERT(setup->buffer_count);
+
+ bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
+ bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
+ bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
+ bitmap[JM_HWCNT_BM] = setup->jm_bm;
+
+ cli = kbasep_vinstr_attach_client(
+ vinstr_ctx,
+ setup->buffer_count,
+ bitmap,
+ &setup->fd,
+ NULL);
+
+ if (!cli)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int kbase_vinstr_legacy_hwc_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_vinstr_client **cli,
+ struct kbase_uk_hwcnt_setup *setup)
+{
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+ KBASE_DEBUG_ASSERT(setup);
+ KBASE_DEBUG_ASSERT(cli);
+
+ if (setup->dump_buffer) {
+ u32 bitmap[4];
+
+ bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
+ bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
+ bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
+ bitmap[JM_HWCNT_BM] = setup->jm_bm;
+
+ if (*cli)
+ return -EBUSY;
+
+ *cli = kbasep_vinstr_attach_client(
+ vinstr_ctx,
+ 0,
+ bitmap,
+ (void *)(long)setup->dump_buffer,
+ NULL);
+
+ if (!(*cli))
+ return -ENOMEM;
+ } else {
+ if (!*cli)
+ return -EINVAL;
+
+ kbase_vinstr_detach_client(*cli);
+ *cli = NULL;
+ }
+
+ return 0;
+}
+
+struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_uk_hwcnt_reader_setup *setup,
+ void *kernel_buffer)
+{
+ u32 bitmap[4];
+
+ if (!vinstr_ctx || !setup || !kernel_buffer)
+ return NULL;
+
+ bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
+ bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
+ bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
+ bitmap[JM_HWCNT_BM] = setup->jm_bm;
+
+ return kbasep_vinstr_attach_client(
+ vinstr_ctx,
+ 0,
+ bitmap,
+ NULL,
+ kernel_buffer);
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_hwcnt_kernel_setup);
+
+int kbase_vinstr_hwc_dump(struct kbase_vinstr_client *cli,
+ enum base_hwcnt_reader_event event_id)
+{
+ int rcode = 0;
+ struct kbase_vinstr_context *vinstr_ctx;
+ u64 timestamp;
+ u32 event_mask;
+
+ if (!cli)
+ return -EINVAL;
+
+ vinstr_ctx = cli->vinstr_ctx;
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ KBASE_DEBUG_ASSERT(event_id < BASE_HWCNT_READER_EVENT_COUNT);
+ event_mask = 1 << event_id;
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ if (event_mask & cli->event_mask) {
+ rcode = kbasep_vinstr_collect_and_accumulate(
+ vinstr_ctx,
+ &timestamp);
+ if (rcode)
+ goto exit;
+
+ rcode = kbasep_vinstr_update_client(cli, timestamp, event_id);
+ if (rcode)
+ goto exit;
+
+ kbasep_vinstr_reprogram(vinstr_ctx);
+ }
+
+exit:
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return rcode;
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_hwc_dump);
+
+int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli)
+{
+ struct kbase_vinstr_context *vinstr_ctx;
+ int rcode;
+ u64 unused;
+
+ if (!cli)
+ return -EINVAL;
+
+ vinstr_ctx = cli->vinstr_ctx;
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ mutex_lock(&vinstr_ctx->lock);
+
+ rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused);
+ if (rcode)
+ goto exit;
+ rcode = kbase_instr_hwcnt_clear(vinstr_ctx->kctx);
+ if (rcode)
+ goto exit;
+ memset(cli->accum_buffer, 0, cli->dump_size);
+
+ kbasep_vinstr_reprogram(vinstr_ctx);
+
+exit:
+ mutex_unlock(&vinstr_ctx->lock);
+
+ return rcode;
+}
+
+int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx)
+{
+ unsigned long flags;
+ int ret = -EAGAIN;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ switch (vinstr_ctx->state) {
+ case VINSTR_SUSPENDED:
+ vinstr_ctx->suspend_cnt++;
+ /* overflow shall not happen */
+ BUG_ON(0 == vinstr_ctx->suspend_cnt);
+ ret = 0;
+ break;
+
+ case VINSTR_IDLE:
+ vinstr_ctx->state = VINSTR_SUSPENDING;
+ schedule_work(&vinstr_ctx->suspend_work);
+ break;
+
+ case VINSTR_DUMPING:
+ vinstr_ctx->state = VINSTR_SUSPENDING;
+ break;
+
+ case VINSTR_SUSPENDING:
+ /* fall through */
+ case VINSTR_RESUMING:
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ return ret;
+}
+
+void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx)
+{
+ wait_event(vinstr_ctx->suspend_waitq,
+ (0 == kbase_vinstr_try_suspend(vinstr_ctx)));
+}
+
+void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx)
+{
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ BUG_ON(VINSTR_SUSPENDING == vinstr_ctx->state);
+ if (VINSTR_SUSPENDED == vinstr_ctx->state) {
+ BUG_ON(0 == vinstr_ctx->suspend_cnt);
+ vinstr_ctx->suspend_cnt--;
+ if (0 == vinstr_ctx->suspend_cnt) {
+ vinstr_ctx->state = VINSTR_RESUMING;
+ schedule_work(&vinstr_ctx->resume_work);
+ }
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+}
diff --git a/midgard-0.r2p0/mali_kbase_vinstr.h b/midgard-0.r2p0/mali_kbase_vinstr.h
new file mode 100755
index 0000000..6207d25
--- /dev/null
+++ b/midgard-0.r2p0/mali_kbase_vinstr.h
@@ -0,0 +1,155 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_VINSTR_H_
+#define _KBASE_VINSTR_H_
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwcnt_reader.h>
+
+/*****************************************************************************/
+
+struct kbase_vinstr_context;
+struct kbase_vinstr_client;
+
+/*****************************************************************************/
+
+/**
+ * kbase_vinstr_init() - initialize the vinstr core
+ * @kbdev: kbase device
+ *
+ * Return: pointer to the vinstr context on success or NULL on failure
+ */
+struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_vinstr_term() - terminate the vinstr core
+ * @vinstr_ctx: vinstr context
+ */
+void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
+ * kbase_vinstr_hwcnt_reader_setup - configure hw counters reader
+ * @vinstr_ctx: vinstr context
+ * @setup: reader's configuration
+ *
+ * Return: zero on success
+ */
+int kbase_vinstr_hwcnt_reader_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_uk_hwcnt_reader_setup *setup);
+
+/**
+ * kbase_vinstr_legacy_hwc_setup - configure hw counters for dumping
+ * @vinstr_ctx: vinstr context
+ * @cli: pointer where to store pointer to new vinstr client structure
+ * @setup: hwc configuration
+ *
+ * Return: zero on success
+ */
+int kbase_vinstr_legacy_hwc_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_vinstr_client **cli,
+ struct kbase_uk_hwcnt_setup *setup);
+
+/**
+ * kbase_vinstr_hwcnt_kernel_setup - configure hw counters for kernel side
+ * client
+ * @vinstr_ctx: vinstr context
+ * @setup: reader's configuration
+ * @kernel_buffer: pointer to dump buffer
+ *
+ * setup->buffer_count and setup->fd are not used for kernel side clients.
+ *
+ * Return: pointer to client structure, or NULL on failure
+ */
+struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup(
+ struct kbase_vinstr_context *vinstr_ctx,
+ struct kbase_uk_hwcnt_reader_setup *setup,
+ void *kernel_buffer);
+
+/**
+ * kbase_vinstr_hwc_dump - issue counter dump for vinstr client
+ * @cli: pointer to vinstr client
+ * @event_id: id of event that triggered hwcnt dump
+ *
+ * Return: zero on success
+ */
+int kbase_vinstr_hwc_dump(
+ struct kbase_vinstr_client *cli,
+ enum base_hwcnt_reader_event event_id);
+
+/**
+ * kbase_vinstr_hwc_clear - performs a reset of the hardware counters for
+ * a given kbase context
+ * @cli: pointer to vinstr client
+ *
+ * Return: zero on success
+ */
+int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli);
+
+/**
+ * kbase_vinstr_try_suspend - try suspending operation of a given vinstr context
+ * @vinstr_ctx: vinstr context
+ *
+ * Return: 0 on success, or negative if state change is in progress
+ *
+ * Warning: This API call is non-generic. It is meant to be used only by
+ * job scheduler state machine.
+ *
+ * Function initiates vinstr switch to suspended state. Once it was called
+ * vinstr enters suspending state. If function return non-zero value, it
+ * indicates that state switch is not complete and function must be called
+ * again. On state switch vinstr will trigger job scheduler state machine
+ * cycle.
+ */
+int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
+ * kbase_vinstr_suspend - suspends operation of a given vinstr context
+ * @vinstr_ctx: vinstr context
+ *
+ * Function initiates vinstr switch to suspended state. Then it blocks until
+ * operation is completed.
+ */
+void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
+ * kbase_vinstr_resume - resumes operation of a given vinstr context
+ * @vinstr_ctx: vinstr context
+ *
+ * Function can be called only if it was preceded by a successful call
+ * to kbase_vinstr_suspend.
+ */
+void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
+ * kbase_vinstr_dump_size - Return required size of dump buffer
+ * @kbdev: device pointer
+ *
+ * Return : buffer size in bytes
+ */
+size_t kbase_vinstr_dump_size(struct kbase_device *kbdev);
+
+/**
+ * kbase_vinstr_detach_client - Detach a client from the vinstr core
+ * @cli: pointer to vinstr client
+ */
+void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli);
+
+#endif /* _KBASE_VINSTR_H_ */
+
diff --git a/midgard-0.r2p0/mali_linux_kbase_trace.h b/midgard-0.r2p0/mali_linux_kbase_trace.h
new file mode 100755
index 0000000..5d6b402
--- /dev/null
+++ b/midgard-0.r2p0/mali_linux_kbase_trace.h
@@ -0,0 +1,201 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+#if !defined(_TRACE_MALI_KBASE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_KBASE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(mali_slot_template,
+ TP_PROTO(int jobslot, unsigned int info_val),
+ TP_ARGS(jobslot, info_val),
+ TP_STRUCT__entry(
+ __field(unsigned int, jobslot)
+ __field(unsigned int, info_val)
+ ),
+ TP_fast_assign(
+ __entry->jobslot = jobslot;
+ __entry->info_val = info_val;
+ ),
+ TP_printk("jobslot=%u info=%u", __entry->jobslot, __entry->info_val)
+);
+
+#define DEFINE_MALI_SLOT_EVENT(name) \
+DEFINE_EVENT(mali_slot_template, mali_##name, \
+ TP_PROTO(int jobslot, unsigned int info_val), \
+ TP_ARGS(jobslot, info_val))
+DEFINE_MALI_SLOT_EVENT(JM_SUBMIT);
+DEFINE_MALI_SLOT_EVENT(JM_JOB_DONE);
+DEFINE_MALI_SLOT_EVENT(JM_UPDATE_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_CHECK_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_SOFT_OR_HARD_STOP);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_EVICT);
+DEFINE_MALI_SLOT_EVENT(JM_BEGIN_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JM_END_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_SUBMIT_TO_BLOCKED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_CURRENT);
+DEFINE_MALI_SLOT_EVENT(JD_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_CORES_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_INUSE_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_AFFINITY_WOULD_VIOLATE);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_RETRY_NEEDED);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB_IRQ);
+#undef DEFINE_MALI_SLOT_EVENT
+
+DECLARE_EVENT_CLASS(mali_refcount_template,
+ TP_PROTO(int refcount, unsigned int info_val),
+ TP_ARGS(refcount, info_val),
+ TP_STRUCT__entry(
+ __field(unsigned int, refcount)
+ __field(unsigned int, info_val)
+ ),
+ TP_fast_assign(
+ __entry->refcount = refcount;
+ __entry->info_val = info_val;
+ ),
+ TP_printk("refcount=%u info=%u", __entry->refcount, __entry->info_val)
+);
+
+#define DEFINE_MALI_REFCOUNT_EVENT(name) \
+DEFINE_EVENT(mali_refcount_template, mali_##name, \
+ TP_PROTO(int refcount, unsigned int info_val), \
+ TP_ARGS(refcount, info_val))
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX_NOLOCK);
+DEFINE_MALI_REFCOUNT_EVENT(JS_ADD_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_REMOVE_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RELEASE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_TRY_SCHEDULE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_INIT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TERM_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_ENQUEUE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_DEQUEUE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TRY_EVICT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_ADD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_REMOVE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_FOREACH_CTX_JOBS);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_ACTIVE);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_IDLE);
+#undef DEFINE_MALI_REFCOUNT_EVENT
+
+DECLARE_EVENT_CLASS(mali_add_template,
+ TP_PROTO(int gpu_addr, unsigned int info_val),
+ TP_ARGS(gpu_addr, info_val),
+ TP_STRUCT__entry(
+ __field(unsigned int, gpu_addr)
+ __field(unsigned int, info_val)
+ ),
+ TP_fast_assign(
+ __entry->gpu_addr = gpu_addr;
+ __entry->info_val = info_val;
+ ),
+ TP_printk("gpu_addr=%u info=%u", __entry->gpu_addr, __entry->info_val)
+);
+
+#define DEFINE_MALI_ADD_EVENT(name) \
+DEFINE_EVENT(mali_add_template, mali_##name, \
+ TP_PROTO(int gpu_addr, unsigned int info_val), \
+ TP_ARGS(gpu_addr, info_val))
+DEFINE_MALI_ADD_EVENT(CORE_CTX_DESTROY);
+DEFINE_MALI_ADD_EVENT(CORE_CTX_HWINSTR_TERM);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_DONE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_SOFT_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_HARD_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_SAMPLE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_CLEAN_INV_CACHES);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER_END);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL);
+DEFINE_MALI_ADD_EVENT(JD_ZAP_CONTEXT);
+DEFINE_MALI_ADD_EVENT(JM_IRQ);
+DEFINE_MALI_ADD_EVENT(JM_IRQ_END);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS_DONE);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_NON_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_DONE);
+DEFINE_MALI_ADD_EVENT(JM_SUBMIT_AFTER_RESET);
+DEFINE_MALI_ADD_EVENT(JM_JOB_COMPLETE);
+DEFINE_MALI_ADD_EVENT(JS_FAST_START_EVICTS_CTX);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_CTX);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_CTX);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_END);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_START);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_ENQUEUE_JOB);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_DESIRED);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERING_UP);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERED_UP);
+DEFINE_MALI_ADD_EVENT(PM_PWRON);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_L2);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_L2);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_L2);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_UNREQUEST_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_INUSE);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_INUSE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_GPU_ON);
+DEFINE_MALI_ADD_EVENT(PM_GPU_OFF);
+DEFINE_MALI_ADD_EVENT(PM_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_INIT);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_TERM);
+DEFINE_MALI_ADD_EVENT(PM_CA_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_WAKE_WAITERS);
+#undef DEFINE_MALI_ADD_EVENT
+
+#endif /* _TRACE_MALI_KBASE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mali_linux_kbase_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/midgard-0.r2p0/mali_linux_trace.h b/midgard-0.r2p0/mali_linux_trace.h
new file mode 100755
index 0000000..2be06a5
--- /dev/null
+++ b/midgard-0.r2p0/mali_linux_trace.h
@@ -0,0 +1,189 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+#include <linux/tracepoint.h>
+
+#define MALI_JOB_SLOTS_EVENT_CHANGED
+
+/**
+ * mali_job_slots_event - called from mali_kbase_core_linux.c
+ * @event_id: ORed together bitfields representing a type of event, made with the GATOR_MAKE_EVENT() macro.
+ */
+TRACE_EVENT(mali_job_slots_event,
+ TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid,
+ unsigned char job_id),
+ TP_ARGS(event_id, tgid, pid, job_id),
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned int, tgid)
+ __field(unsigned int, pid)
+ __field(unsigned char, job_id)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->tgid = tgid;
+ __entry->pid = pid;
+ __entry->job_id = job_id;
+ ),
+ TP_printk("event=%u tgid=%u pid=%u job_id=%u",
+ __entry->event_id, __entry->tgid, __entry->pid, __entry->job_id)
+);
+
+/**
+ * mali_pm_status - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting either power status of the cores (1-ON, 0-OFF)
+ */
+TRACE_EVENT(mali_pm_status,
+ TP_PROTO(unsigned int event_id, unsigned long long value),
+ TP_ARGS(event_id, value),
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned long long, value)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->value = value;
+ ),
+ TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_pm_power_on - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting the cores to power up
+ */
+TRACE_EVENT(mali_pm_power_on,
+ TP_PROTO(unsigned int event_id, unsigned long long value),
+ TP_ARGS(event_id, value),
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned long long, value)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->value = value;
+ ),
+ TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_pm_power_off - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting the cores to power down
+ */
+TRACE_EVENT(mali_pm_power_off,
+ TP_PROTO(unsigned int event_id, unsigned long long value),
+ TP_ARGS(event_id, value),
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned long long, value)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->value = value;
+ ),
+ TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_page_fault_insert_pages - Called by page_fault_worker()
+ * it reports an MMU page fault resulting in new pages being mapped.
+ * @event_id: MMU address space number.
+ * @value: number of newly allocated pages
+ */
+TRACE_EVENT(mali_page_fault_insert_pages,
+ TP_PROTO(int event_id, unsigned long value),
+ TP_ARGS(event_id, value),
+ TP_STRUCT__entry(
+ __field(int, event_id)
+ __field(unsigned long, value)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->value = value;
+ ),
+ TP_printk("event %d = %lu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_mmu_as_in_use - Called by assign_and_activate_kctx_addr_space()
+ * it reports that a certain MMU address space is in use now.
+ * @event_id: MMU address space number.
+ */
+TRACE_EVENT(mali_mmu_as_in_use,
+ TP_PROTO(int event_id),
+ TP_ARGS(event_id),
+ TP_STRUCT__entry(
+ __field(int, event_id)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+ TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * mali_mmu_as_released - Called by kbasep_js_runpool_release_ctx_internal()
+ * it reports that a certain MMU address space has been released now.
+ * @event_id: MMU address space number.
+ */
+TRACE_EVENT(mali_mmu_as_released,
+ TP_PROTO(int event_id),
+ TP_ARGS(event_id),
+ TP_STRUCT__entry(
+ __field(int, event_id)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+ TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * mali_total_alloc_pages_change - Called by kbase_atomic_add_pages()
+ * and by kbase_atomic_sub_pages()
+ * it reports that the total number of allocated pages is changed.
+ * @event_id: number of pages to be added or subtracted (according to the sign).
+ */
+TRACE_EVENT(mali_total_alloc_pages_change,
+ TP_PROTO(long long int event_id),
+ TP_ARGS(event_id),
+ TP_STRUCT__entry(
+ __field(long long int, event_id)
+ ),
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+ TP_printk("event=%lld", __entry->event_id)
+);
+
+#endif /* _TRACE_MALI_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/midgard-0.r2p0/mali_malisw.h b/midgard-0.r2p0/mali_malisw.h
new file mode 100755
index 0000000..9945293
--- /dev/null
+++ b/midgard-0.r2p0/mali_malisw.h
@@ -0,0 +1,131 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Kernel-wide include for common macros and types.
+ */
+
+#ifndef _MALISW_H_
+#define _MALISW_H_
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+#define U8_MAX ((u8)~0U)
+#define S8_MAX ((s8)(U8_MAX>>1))
+#define S8_MIN ((s8)(-S8_MAX - 1))
+#define U16_MAX ((u16)~0U)
+#define S16_MAX ((s16)(U16_MAX>>1))
+#define S16_MIN ((s16)(-S16_MAX - 1))
+#define U32_MAX ((u32)~0U)
+#define S32_MAX ((s32)(U32_MAX>>1))
+#define S32_MIN ((s32)(-S32_MAX - 1))
+#define U64_MAX ((u64)~0ULL)
+#define S64_MAX ((s64)(U64_MAX>>1))
+#define S64_MIN ((s64)(-S64_MAX - 1))
+#endif /* LINUX_VERSION_CODE */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+#define SIZE_MAX (~(size_t)0)
+#endif /* LINUX_VERSION_CODE */
+
+/**
+ * MIN - Return the lesser of two values.
+ *
+ * As a macro it may evaluate its arguments more than once.
+ * Refer to MAX macro for more details
+ */
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+/**
+ * MAX - Return the greater of two values.
+ *
+ * As a macro it may evaluate its arguments more than once.
+ * If called on the same two arguments as MIN it is guaranteed to return
+ * the one that MIN didn't return. This is significant for types where not
+ * all values are comparable e.g. NaNs in floating-point types. But if you want
+ * to retrieve the min and max of two values, consider using a conditional swap
+ * instead.
+ */
+#define MAX(x, y) ((x) < (y) ? (y) : (x))
+
+/**
+ * @hideinitializer
+ * Function-like macro for suppressing unused variable warnings. Where possible
+ * such variables should be removed; this macro is present for cases where we
+ * much support API backwards compatibility.
+ */
+#define CSTD_UNUSED(x) ((void)(x))
+
+/**
+ * @hideinitializer
+ * Function-like macro for use where "no behavior" is desired. This is useful
+ * when compile time macros turn a function-like macro in to a no-op, but
+ * where having no statement is otherwise invalid.
+ */
+#define CSTD_NOP(...) ((void)#__VA_ARGS__)
+
+/**
+ * Function-like macro for converting a pointer in to a u64 for storing into
+ * an external data structure. This is commonly used when pairing a 32-bit
+ * CPU with a 64-bit peripheral, such as a Midgard GPU. C's type promotion
+ * is complex and a straight cast does not work reliably as pointers are
+ * often considered as signed.
+ */
+#define PTR_TO_U64(x) ((uint64_t)((uintptr_t)(x)))
+
+/**
+ * @hideinitializer
+ * Function-like macro for stringizing a single level macro.
+ * @code
+ * #define MY_MACRO 32
+ * CSTD_STR1( MY_MACRO )
+ * > "MY_MACRO"
+ * @endcode
+ */
+#define CSTD_STR1(x) #x
+
+/**
+ * @hideinitializer
+ * Function-like macro for stringizing a macro's value. This should not be used
+ * if the macro is defined in a way which may have no value; use the
+ * alternative @c CSTD_STR2N macro should be used instead.
+ * @code
+ * #define MY_MACRO 32
+ * CSTD_STR2( MY_MACRO )
+ * > "32"
+ * @endcode
+ */
+#define CSTD_STR2(x) CSTD_STR1(x)
+
+/**
+ * Specify an assertion value which is evaluated at compile time. Recommended
+ * usage is specification of a @c static @c INLINE function containing all of
+ * the assertions thus:
+ *
+ * @code
+ * static INLINE [module]_compile_time_assertions( void )
+ * {
+ * COMPILE_TIME_ASSERT( sizeof(uintptr_t) == sizeof(intptr_t) );
+ * }
+ * @endcode
+ *
+ * @note Use @c static not @c STATIC. We never want to turn off this @c static
+ * specification for testing purposes.
+ */
+#define CSTD_COMPILE_TIME_ASSERT(expr) \
+ do { switch (0) { case 0: case (expr):; } } while (false)
+
+#endif /* _MALISW_H_ */
diff --git a/midgard-0.r2p0/mali_midg_coherency.h b/midgard-0.r2p0/mali_midg_coherency.h
new file mode 100755
index 0000000..a509cbd
--- /dev/null
+++ b/midgard-0.r2p0/mali_midg_coherency.h
@@ -0,0 +1,26 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _MIDG_COHERENCY_H_
+#define _MIDG_COHERENCY_H_
+
+#define COHERENCY_ACE_LITE 0
+#define COHERENCY_ACE 1
+#define COHERENCY_NONE 31
+#define COHERENCY_FEATURE_BIT(x) (1 << (x))
+
+#endif /* _MIDG_COHERENCY_H_ */
diff --git a/midgard-0.r2p0/mali_midg_regmap.h b/midgard-0.r2p0/mali_midg_regmap.h
new file mode 100755
index 0000000..de3053b
--- /dev/null
+++ b/midgard-0.r2p0/mali_midg_regmap.h
@@ -0,0 +1,580 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _MIDGARD_REGMAP_H_
+#define _MIDGARD_REGMAP_H_
+
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
+
+/*
+ * Begin Register Offsets
+ */
+
+#define GPU_CONTROL_BASE 0x0000
+#define GPU_CONTROL_REG(r) (GPU_CONTROL_BASE + (r))
+#define GPU_ID 0x000 /* (RO) GPU and revision identifier */
+#define L2_FEATURES 0x004 /* (RO) Level 2 cache features */
+#define SUSPEND_SIZE 0x008 /* (RO) Fixed-function suspend buffer
+ size */
+#define TILER_FEATURES 0x00C /* (RO) Tiler Features */
+#define MEM_FEATURES 0x010 /* (RO) Memory system features */
+#define MMU_FEATURES 0x014 /* (RO) MMU features */
+#define AS_PRESENT 0x018 /* (RO) Address space slots present */
+#define JS_PRESENT 0x01C /* (RO) Job slots present */
+#define GPU_IRQ_RAWSTAT 0x020 /* (RW) */
+#define GPU_IRQ_CLEAR 0x024 /* (WO) */
+#define GPU_IRQ_MASK 0x028 /* (RW) */
+#define GPU_IRQ_STATUS 0x02C /* (RO) */
+
+/* IRQ flags */
+#define GPU_FAULT (1 << 0) /* A GPU Fault has occurred */
+#define MULTIPLE_GPU_FAULTS (1 << 7) /* More than one GPU Fault occurred. */
+#define RESET_COMPLETED (1 << 8) /* Set when a reset has completed. Intended to use with SOFT_RESET
+ commands which may take time. */
+#define POWER_CHANGED_SINGLE (1 << 9) /* Set when a single core has finished powering up or down. */
+#define POWER_CHANGED_ALL (1 << 10) /* Set when all cores have finished powering up or down
+ and the power manager is idle. */
+
+#define PRFCNT_SAMPLE_COMPLETED (1 << 16) /* Set when a performance count sample has completed. */
+#define CLEAN_CACHES_COMPLETED (1 << 17) /* Set when a cache clean operation has completed. */
+
+#define GPU_IRQ_REG_ALL (GPU_FAULT | MULTIPLE_GPU_FAULTS | RESET_COMPLETED \
+ | POWER_CHANGED_ALL | PRFCNT_SAMPLE_COMPLETED)
+
+#define GPU_COMMAND 0x030 /* (WO) */
+#define GPU_STATUS 0x034 /* (RO) */
+#define LATEST_FLUSH 0x038 /* (RO) */
+
+#define GROUPS_L2_COHERENT (1 << 0) /* Cores groups are l2 coherent */
+#define GPU_DBGEN (1 << 8) /* DBGEN wire status */
+
+#define GPU_FAULTSTATUS 0x03C /* (RO) GPU exception type and fault status */
+#define GPU_FAULTADDRESS_LO 0x040 /* (RO) GPU exception fault address, low word */
+#define GPU_FAULTADDRESS_HI 0x044 /* (RO) GPU exception fault address, high word */
+
+#define PWR_KEY 0x050 /* (WO) Power manager key register */
+#define PWR_OVERRIDE0 0x054 /* (RW) Power manager override settings */
+#define PWR_OVERRIDE1 0x058 /* (RW) Power manager override settings */
+
+#define PRFCNT_BASE_LO 0x060 /* (RW) Performance counter memory region base address, low word */
+#define PRFCNT_BASE_HI 0x064 /* (RW) Performance counter memory region base address, high word */
+#define PRFCNT_CONFIG 0x068 /* (RW) Performance counter configuration */
+#define PRFCNT_JM_EN 0x06C /* (RW) Performance counter enable flags for Job Manager */
+#define PRFCNT_SHADER_EN 0x070 /* (RW) Performance counter enable flags for shader cores */
+#define PRFCNT_TILER_EN 0x074 /* (RW) Performance counter enable flags for tiler */
+#define PRFCNT_MMU_L2_EN 0x07C /* (RW) Performance counter enable flags for MMU/L2 cache */
+
+#define CYCLE_COUNT_LO 0x090 /* (RO) Cycle counter, low word */
+#define CYCLE_COUNT_HI 0x094 /* (RO) Cycle counter, high word */
+#define TIMESTAMP_LO 0x098 /* (RO) Global time stamp counter, low word */
+#define TIMESTAMP_HI 0x09C /* (RO) Global time stamp counter, high word */
+
+#define THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */
+#define THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */
+#define THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */
+#define THREAD_FEATURES 0x0AC /* (RO) Thread features */
+
+#define TEXTURE_FEATURES_0 0x0B0 /* (RO) Support flags for indexed texture formats 0..31 */
+#define TEXTURE_FEATURES_1 0x0B4 /* (RO) Support flags for indexed texture formats 32..63 */
+#define TEXTURE_FEATURES_2 0x0B8 /* (RO) Support flags for indexed texture formats 64..95 */
+
+#define TEXTURE_FEATURES_REG(n) GPU_CONTROL_REG(TEXTURE_FEATURES_0 + ((n) << 2))
+
+#define JS0_FEATURES 0x0C0 /* (RO) Features of job slot 0 */
+#define JS1_FEATURES 0x0C4 /* (RO) Features of job slot 1 */
+#define JS2_FEATURES 0x0C8 /* (RO) Features of job slot 2 */
+#define JS3_FEATURES 0x0CC /* (RO) Features of job slot 3 */
+#define JS4_FEATURES 0x0D0 /* (RO) Features of job slot 4 */
+#define JS5_FEATURES 0x0D4 /* (RO) Features of job slot 5 */
+#define JS6_FEATURES 0x0D8 /* (RO) Features of job slot 6 */
+#define JS7_FEATURES 0x0DC /* (RO) Features of job slot 7 */
+#define JS8_FEATURES 0x0E0 /* (RO) Features of job slot 8 */
+#define JS9_FEATURES 0x0E4 /* (RO) Features of job slot 9 */
+#define JS10_FEATURES 0x0E8 /* (RO) Features of job slot 10 */
+#define JS11_FEATURES 0x0EC /* (RO) Features of job slot 11 */
+#define JS12_FEATURES 0x0F0 /* (RO) Features of job slot 12 */
+#define JS13_FEATURES 0x0F4 /* (RO) Features of job slot 13 */
+#define JS14_FEATURES 0x0F8 /* (RO) Features of job slot 14 */
+#define JS15_FEATURES 0x0FC /* (RO) Features of job slot 15 */
+
+#define JS_FEATURES_REG(n) GPU_CONTROL_REG(JS0_FEATURES + ((n) << 2))
+
+#define SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
+#define SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */
+
+#define TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */
+#define TILER_PRESENT_HI 0x114 /* (RO) Tiler core present bitmap, high word */
+
+#define L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */
+#define L2_PRESENT_HI 0x124 /* (RO) Level 2 cache present bitmap, high word */
+
+
+#define SHADER_READY_LO 0x140 /* (RO) Shader core ready bitmap, low word */
+#define SHADER_READY_HI 0x144 /* (RO) Shader core ready bitmap, high word */
+
+#define TILER_READY_LO 0x150 /* (RO) Tiler core ready bitmap, low word */
+#define TILER_READY_HI 0x154 /* (RO) Tiler core ready bitmap, high word */
+
+#define L2_READY_LO 0x160 /* (RO) Level 2 cache ready bitmap, low word */
+#define L2_READY_HI 0x164 /* (RO) Level 2 cache ready bitmap, high word */
+
+
+#define SHADER_PWRON_LO 0x180 /* (WO) Shader core power on bitmap, low word */
+#define SHADER_PWRON_HI 0x184 /* (WO) Shader core power on bitmap, high word */
+
+#define TILER_PWRON_LO 0x190 /* (WO) Tiler core power on bitmap, low word */
+#define TILER_PWRON_HI 0x194 /* (WO) Tiler core power on bitmap, high word */
+
+#define L2_PWRON_LO 0x1A0 /* (WO) Level 2 cache power on bitmap, low word */
+#define L2_PWRON_HI 0x1A4 /* (WO) Level 2 cache power on bitmap, high word */
+
+#define SHADER_PWROFF_LO 0x1C0 /* (WO) Shader core power off bitmap, low word */
+#define SHADER_PWROFF_HI 0x1C4 /* (WO) Shader core power off bitmap, high word */
+
+#define TILER_PWROFF_LO 0x1D0 /* (WO) Tiler core power off bitmap, low word */
+#define TILER_PWROFF_HI 0x1D4 /* (WO) Tiler core power off bitmap, high word */
+
+#define L2_PWROFF_LO 0x1E0 /* (WO) Level 2 cache power off bitmap, low word */
+#define L2_PWROFF_HI 0x1E4 /* (WO) Level 2 cache power off bitmap, high word */
+
+#define SHADER_PWRTRANS_LO 0x200 /* (RO) Shader core power transition bitmap, low word */
+#define SHADER_PWRTRANS_HI 0x204 /* (RO) Shader core power transition bitmap, high word */
+
+#define TILER_PWRTRANS_LO 0x210 /* (RO) Tiler core power transition bitmap, low word */
+#define TILER_PWRTRANS_HI 0x214 /* (RO) Tiler core power transition bitmap, high word */
+
+#define L2_PWRTRANS_LO 0x220 /* (RO) Level 2 cache power transition bitmap, low word */
+#define L2_PWRTRANS_HI 0x224 /* (RO) Level 2 cache power transition bitmap, high word */
+
+#define SHADER_PWRACTIVE_LO 0x240 /* (RO) Shader core active bitmap, low word */
+#define SHADER_PWRACTIVE_HI 0x244 /* (RO) Shader core active bitmap, high word */
+
+#define TILER_PWRACTIVE_LO 0x250 /* (RO) Tiler core active bitmap, low word */
+#define TILER_PWRACTIVE_HI 0x254 /* (RO) Tiler core active bitmap, high word */
+
+#define L2_PWRACTIVE_LO 0x260 /* (RO) Level 2 cache active bitmap, low word */
+#define L2_PWRACTIVE_HI 0x264 /* (RO) Level 2 cache active bitmap, high word */
+
+#define COHERENCY_FEATURES 0x300 /* (RO) Coherency features present */
+#define COHERENCY_ENABLE 0x304 /* (RW) Coherency enable */
+
+#define JM_CONFIG 0xF00 /* (RW) Job Manager configuration register (Implementation specific register) */
+#define SHADER_CONFIG 0xF04 /* (RW) Shader core configuration settings (Implementation specific register) */
+#define TILER_CONFIG 0xF08 /* (RW) Tiler core configuration settings (Implementation specific register) */
+#define L2_MMU_CONFIG 0xF0C /* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */
+
+#define JOB_CONTROL_BASE 0x1000
+
+#define JOB_CONTROL_REG(r) (JOB_CONTROL_BASE + (r))
+
+#define JOB_IRQ_RAWSTAT 0x000 /* Raw interrupt status register */
+#define JOB_IRQ_CLEAR 0x004 /* Interrupt clear register */
+#define JOB_IRQ_MASK 0x008 /* Interrupt mask register */
+#define JOB_IRQ_STATUS 0x00C /* Interrupt status register */
+#define JOB_IRQ_JS_STATE 0x010 /* status==active and _next == busy snapshot from last JOB_IRQ_CLEAR */
+#define JOB_IRQ_THROTTLE 0x014 /* cycles to delay delivering an interrupt externally. The JOB_IRQ_STATUS is NOT affected by this, just the delivery of the interrupt. */
+
+#define JOB_SLOT0 0x800 /* Configuration registers for job slot 0 */
+#define JOB_SLOT1 0x880 /* Configuration registers for job slot 1 */
+#define JOB_SLOT2 0x900 /* Configuration registers for job slot 2 */
+#define JOB_SLOT3 0x980 /* Configuration registers for job slot 3 */
+#define JOB_SLOT4 0xA00 /* Configuration registers for job slot 4 */
+#define JOB_SLOT5 0xA80 /* Configuration registers for job slot 5 */
+#define JOB_SLOT6 0xB00 /* Configuration registers for job slot 6 */
+#define JOB_SLOT7 0xB80 /* Configuration registers for job slot 7 */
+#define JOB_SLOT8 0xC00 /* Configuration registers for job slot 8 */
+#define JOB_SLOT9 0xC80 /* Configuration registers for job slot 9 */
+#define JOB_SLOT10 0xD00 /* Configuration registers for job slot 10 */
+#define JOB_SLOT11 0xD80 /* Configuration registers for job slot 11 */
+#define JOB_SLOT12 0xE00 /* Configuration registers for job slot 12 */
+#define JOB_SLOT13 0xE80 /* Configuration registers for job slot 13 */
+#define JOB_SLOT14 0xF00 /* Configuration registers for job slot 14 */
+#define JOB_SLOT15 0xF80 /* Configuration registers for job slot 15 */
+
+#define JOB_SLOT_REG(n, r) (JOB_CONTROL_REG(JOB_SLOT0 + ((n) << 7)) + (r))
+
+#define JS_HEAD_LO 0x00 /* (RO) Job queue head pointer for job slot n, low word */
+#define JS_HEAD_HI 0x04 /* (RO) Job queue head pointer for job slot n, high word */
+#define JS_TAIL_LO 0x08 /* (RO) Job queue tail pointer for job slot n, low word */
+#define JS_TAIL_HI 0x0C /* (RO) Job queue tail pointer for job slot n, high word */
+#define JS_AFFINITY_LO 0x10 /* (RO) Core affinity mask for job slot n, low word */
+#define JS_AFFINITY_HI 0x14 /* (RO) Core affinity mask for job slot n, high word */
+#define JS_CONFIG 0x18 /* (RO) Configuration settings for job slot n */
+#define JS_XAFFINITY 0x1C /* (RO) Extended affinity mask for job
+ slot n */
+
+#define JS_COMMAND 0x20 /* (WO) Command register for job slot n */
+#define JS_STATUS 0x24 /* (RO) Status register for job slot n */
+
+#define JS_HEAD_NEXT_LO 0x40 /* (RW) Next job queue head pointer for job slot n, low word */
+#define JS_HEAD_NEXT_HI 0x44 /* (RW) Next job queue head pointer for job slot n, high word */
+
+#define JS_AFFINITY_NEXT_LO 0x50 /* (RW) Next core affinity mask for job slot n, low word */
+#define JS_AFFINITY_NEXT_HI 0x54 /* (RW) Next core affinity mask for job slot n, high word */
+#define JS_CONFIG_NEXT 0x58 /* (RW) Next configuration settings for job slot n */
+#define JS_XAFFINITY_NEXT 0x5C /* (RW) Next extended affinity mask for
+ job slot n */
+
+#define JS_COMMAND_NEXT 0x60 /* (RW) Next command register for job slot n */
+
+#define JS_FLUSH_ID_NEXT 0x70 /* (RW) Next job slot n cache flush ID */
+
+#define MEMORY_MANAGEMENT_BASE 0x2000
+#define MMU_REG(r) (MEMORY_MANAGEMENT_BASE + (r))
+
+#define MMU_IRQ_RAWSTAT 0x000 /* (RW) Raw interrupt status register */
+#define MMU_IRQ_CLEAR 0x004 /* (WO) Interrupt clear register */
+#define MMU_IRQ_MASK 0x008 /* (RW) Interrupt mask register */
+#define MMU_IRQ_STATUS 0x00C /* (RO) Interrupt status register */
+
+#define MMU_AS0 0x400 /* Configuration registers for address space 0 */
+#define MMU_AS1 0x440 /* Configuration registers for address space 1 */
+#define MMU_AS2 0x480 /* Configuration registers for address space 2 */
+#define MMU_AS3 0x4C0 /* Configuration registers for address space 3 */
+#define MMU_AS4 0x500 /* Configuration registers for address space 4 */
+#define MMU_AS5 0x540 /* Configuration registers for address space 5 */
+#define MMU_AS6 0x580 /* Configuration registers for address space 6 */
+#define MMU_AS7 0x5C0 /* Configuration registers for address space 7 */
+#define MMU_AS8 0x600 /* Configuration registers for address space 8 */
+#define MMU_AS9 0x640 /* Configuration registers for address space 9 */
+#define MMU_AS10 0x680 /* Configuration registers for address space 10 */
+#define MMU_AS11 0x6C0 /* Configuration registers for address space 11 */
+#define MMU_AS12 0x700 /* Configuration registers for address space 12 */
+#define MMU_AS13 0x740 /* Configuration registers for address space 13 */
+#define MMU_AS14 0x780 /* Configuration registers for address space 14 */
+#define MMU_AS15 0x7C0 /* Configuration registers for address space 15 */
+
+#define MMU_AS_REG(n, r) (MMU_REG(MMU_AS0 + ((n) << 6)) + (r))
+
+#define AS_TRANSTAB_LO 0x00 /* (RW) Translation Table Base Address for address space n, low word */
+#define AS_TRANSTAB_HI 0x04 /* (RW) Translation Table Base Address for address space n, high word */
+#define AS_MEMATTR_LO 0x08 /* (RW) Memory attributes for address space n, low word. */
+#define AS_MEMATTR_HI 0x0C /* (RW) Memory attributes for address space n, high word. */
+#define AS_LOCKADDR_LO 0x10 /* (RW) Lock region address for address space n, low word */
+#define AS_LOCKADDR_HI 0x14 /* (RW) Lock region address for address space n, high word */
+#define AS_COMMAND 0x18 /* (WO) MMU command register for address space n */
+#define AS_FAULTSTATUS 0x1C /* (RO) MMU fault status register for address space n */
+#define AS_FAULTADDRESS_LO 0x20 /* (RO) Fault Address for address space n, low word */
+#define AS_FAULTADDRESS_HI 0x24 /* (RO) Fault Address for address space n, high word */
+#define AS_STATUS 0x28 /* (RO) Status flags for address space n */
+
+
+/* (RW) Translation table configuration for address space n, low word */
+#define AS_TRANSCFG_LO 0x30
+/* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_HI 0x34
+/* (RO) Secondary fault address for address space n, low word */
+#define AS_FAULTEXTRA_LO 0x38
+/* (RO) Secondary fault address for address space n, high word */
+#define AS_FAULTEXTRA_HI 0x3C
+
+/* End Register Offsets */
+
+/*
+ * MMU_IRQ_RAWSTAT register values. Values are valid also for
+ MMU_IRQ_CLEAR, MMU_IRQ_MASK, MMU_IRQ_STATUS registers.
+ */
+
+#define MMU_PAGE_FAULT_FLAGS 16
+
+/* Macros returning a bitmask to retrieve page fault or bus error flags from
+ * MMU registers */
+#define MMU_PAGE_FAULT(n) (1UL << (n))
+#define MMU_BUS_ERROR(n) (1UL << ((n) + MMU_PAGE_FAULT_FLAGS))
+
+/*
+ * Begin LPAE MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK 0xfffff000
+#define AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED (0u << 0)
+#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY (1u << 1)
+#define AS_TRANSTAB_LPAE_ADRMODE_TABLE (3u << 0)
+#define AS_TRANSTAB_LPAE_READ_INNER (1u << 2)
+#define AS_TRANSTAB_LPAE_SHARE_OUTER (1u << 4)
+
+#define AS_TRANSTAB_LPAE_ADRMODE_MASK 0x00000003
+
+/*
+ * Begin AARCH64 MMU TRANSTAB register values
+ */
+#define MMU_HW_OUTA_BITS 40
+#define AS_TRANSTAB_BASE_MASK ((1ULL << MMU_HW_OUTA_BITS) - (1ULL << 4))
+
+/*
+ * Begin MMU STATUS register values
+ */
+#define AS_STATUS_AS_ACTIVE 0x01
+
+#define AS_FAULTSTATUS_EXCEPTION_CODE_MASK (0x7<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT (0x0<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT (0x1<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT (0x2<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG (0x3<<3)
+
+#define AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT (0x4<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT (0x5<<3)
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3<<8)
+
+/*
+ * Begin MMU TRANSCFG register values
+ */
+
+#define AS_TRANSCFG_ADRMODE_LEGACY 0
+#define AS_TRANSCFG_ADRMODE_UNMAPPED 1
+#define AS_TRANSCFG_ADRMODE_IDENTITY 2
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K 6
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K 8
+
+#define AS_TRANSCFG_ADRMODE_MASK 0xF
+
+
+/*
+ * Begin TRANSCFG register values
+ */
+#define AS_TRANSCFG_PTW_MEMATTR_MASK (3 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_NON_CACHEABLE (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK (2 << 24)
+
+#define AS_TRANSCFG_PTW_SH_MASK ((3 << 28))
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+
+/*
+ * Begin Command Values
+ */
+
+/* JS_COMMAND register commands */
+#define JS_COMMAND_NOP 0x00 /* NOP Operation. Writing this value is ignored */
+#define JS_COMMAND_START 0x01 /* Start processing a job chain. Writing this value is ignored */
+#define JS_COMMAND_SOFT_STOP 0x02 /* Gently stop processing a job chain */
+#define JS_COMMAND_HARD_STOP 0x03 /* Rudely stop processing a job chain */
+#define JS_COMMAND_SOFT_STOP_0 0x04 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_HARD_STOP_0 0x05 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_SOFT_STOP_1 0x06 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
+#define JS_COMMAND_HARD_STOP_1 0x07 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
+
+#define JS_COMMAND_MASK 0x07 /* Mask of bits currently in use by the HW */
+
+/* AS_COMMAND register commands */
+#define AS_COMMAND_NOP 0x00 /* NOP Operation */
+#define AS_COMMAND_UPDATE 0x01 /* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */
+#define AS_COMMAND_LOCK 0x02 /* Issue a lock region command to all MMUs */
+#define AS_COMMAND_UNLOCK 0x03 /* Issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs
+ (deprecated - only for use with T60x) */
+#define AS_COMMAND_FLUSH_PT 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then
+ flush all L2 caches then issue a flush region command to all MMUs */
+
+/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
+#define JS_CONFIG_START_FLUSH_NO_ACTION (0u << 0)
+#define JS_CONFIG_START_FLUSH_CLEAN (1u << 8)
+#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
+#define JS_CONFIG_START_MMU (1u << 10)
+#define JS_CONFIG_JOB_CHAIN_FLAG (1u << 11)
+#define JS_CONFIG_END_FLUSH_NO_ACTION JS_CONFIG_START_FLUSH_NO_ACTION
+#define JS_CONFIG_END_FLUSH_CLEAN (1u << 12)
+#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE (3u << 12)
+#define JS_CONFIG_ENABLE_FLUSH_REDUCTION (1u << 14)
+#define JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK (1u << 15)
+#define JS_CONFIG_THREAD_PRI(n) ((n) << 16)
+
+/* JS_XAFFINITY register values */
+#define JS_XAFFINITY_XAFFINITY_ENABLE (1u << 0)
+#define JS_XAFFINITY_TILER_ENABLE (1u << 8)
+#define JS_XAFFINITY_CACHE_ENABLE (1u << 16)
+
+/* JS_STATUS register values */
+
+/* NOTE: Please keep this values in sync with enum base_jd_event_code in mali_base_kernel.h.
+ * The values are separated to avoid dependency of userspace and kernel code.
+ */
+
+/* Group of values representing the job status insead a particular fault */
+#define JS_STATUS_NO_EXCEPTION_BASE 0x00
+#define JS_STATUS_INTERRUPTED (JS_STATUS_NO_EXCEPTION_BASE + 0x02) /* 0x02 means INTERRUPTED */
+#define JS_STATUS_STOPPED (JS_STATUS_NO_EXCEPTION_BASE + 0x03) /* 0x03 means STOPPED */
+#define JS_STATUS_TERMINATED (JS_STATUS_NO_EXCEPTION_BASE + 0x04) /* 0x04 means TERMINATED */
+
+/* General fault values */
+#define JS_STATUS_FAULT_BASE 0x40
+#define JS_STATUS_CONFIG_FAULT (JS_STATUS_FAULT_BASE) /* 0x40 means CONFIG FAULT */
+#define JS_STATUS_POWER_FAULT (JS_STATUS_FAULT_BASE + 0x01) /* 0x41 means POWER FAULT */
+#define JS_STATUS_READ_FAULT (JS_STATUS_FAULT_BASE + 0x02) /* 0x42 means READ FAULT */
+#define JS_STATUS_WRITE_FAULT (JS_STATUS_FAULT_BASE + 0x03) /* 0x43 means WRITE FAULT */
+#define JS_STATUS_AFFINITY_FAULT (JS_STATUS_FAULT_BASE + 0x04) /* 0x44 means AFFINITY FAULT */
+#define JS_STATUS_BUS_FAULT (JS_STATUS_FAULT_BASE + 0x08) /* 0x48 means BUS FAULT */
+
+/* Instruction or data faults */
+#define JS_STATUS_INSTRUCTION_FAULT_BASE 0x50
+#define JS_STATUS_INSTR_INVALID_PC (JS_STATUS_INSTRUCTION_FAULT_BASE) /* 0x50 means INSTR INVALID PC */
+#define JS_STATUS_INSTR_INVALID_ENC (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x01) /* 0x51 means INSTR INVALID ENC */
+#define JS_STATUS_INSTR_TYPE_MISMATCH (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x02) /* 0x52 means INSTR TYPE MISMATCH */
+#define JS_STATUS_INSTR_OPERAND_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x03) /* 0x53 means INSTR OPERAND FAULT */
+#define JS_STATUS_INSTR_TLS_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x04) /* 0x54 means INSTR TLS FAULT */
+#define JS_STATUS_INSTR_BARRIER_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x05) /* 0x55 means INSTR BARRIER FAULT */
+#define JS_STATUS_INSTR_ALIGN_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x06) /* 0x56 means INSTR ALIGN FAULT */
+/* NOTE: No fault with 0x57 code defined in spec. */
+#define JS_STATUS_DATA_INVALID_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x08) /* 0x58 means DATA INVALID FAULT */
+#define JS_STATUS_TILE_RANGE_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x09) /* 0x59 means TILE RANGE FAULT */
+#define JS_STATUS_ADDRESS_RANGE_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x0A) /* 0x5A means ADDRESS RANGE FAULT */
+
+/* Other faults */
+#define JS_STATUS_MEMORY_FAULT_BASE 0x60
+#define JS_STATUS_OUT_OF_MEMORY (JS_STATUS_MEMORY_FAULT_BASE) /* 0x60 means OUT OF MEMORY */
+#define JS_STATUS_UNKNOWN 0x7F /* 0x7F means UNKNOWN */
+
+/* GPU_COMMAND values */
+#define GPU_COMMAND_NOP 0x00 /* No operation, nothing happens */
+#define GPU_COMMAND_SOFT_RESET 0x01 /* Stop all external bus interfaces, and then reset the entire GPU. */
+#define GPU_COMMAND_HARD_RESET 0x02 /* Immediately reset the entire GPU. */
+#define GPU_COMMAND_PRFCNT_CLEAR 0x03 /* Clear all performance counters, setting them all to zero. */
+#define GPU_COMMAND_PRFCNT_SAMPLE 0x04 /* Sample all performance counters, writing them out to memory */
+#define GPU_COMMAND_CYCLE_COUNT_START 0x05 /* Starts the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CYCLE_COUNT_STOP 0x06 /* Stops the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CLEAN_CACHES 0x07 /* Clean all caches */
+#define GPU_COMMAND_CLEAN_INV_CACHES 0x08 /* Clean and invalidate all caches */
+#define GPU_COMMAND_SET_PROTECTED_MODE 0x09 /* Places the GPU in protected mode */
+
+/* End Command Values */
+
+/* GPU_STATUS values */
+#define GPU_STATUS_PRFCNT_ACTIVE (1 << 2) /* Set if the performance counters are active. */
+#define GPU_STATUS_PROTECTED_MODE_ACTIVE (1 << 7) /* Set if protected mode is active */
+
+/* PRFCNT_CONFIG register values */
+#define PRFCNT_CONFIG_MODE_SHIFT 0 /* Counter mode position. */
+#define PRFCNT_CONFIG_AS_SHIFT 4 /* Address space bitmap position. */
+#define PRFCNT_CONFIG_SETSELECT_SHIFT 8 /* Set select position. */
+
+#define PRFCNT_CONFIG_MODE_OFF 0 /* The performance counters are disabled. */
+#define PRFCNT_CONFIG_MODE_MANUAL 1 /* The performance counters are enabled, but are only written out when a PRFCNT_SAMPLE command is issued using the GPU_COMMAND register. */
+#define PRFCNT_CONFIG_MODE_TILE 2 /* The performance counters are enabled, and are written out each time a tile finishes rendering. */
+
+/* AS<n>_MEMATTR values: */
+/* Use GPU implementation-defined caching policy. */
+#define AS_MEMATTR_IMPL_DEF_CACHE_POLICY 0x88ull
+/* The attribute set to force all resources to be cached. */
+#define AS_MEMATTR_FORCE_TO_CACHE_ALL 0x8Full
+/* Inner write-alloc cache setup, no outer caching */
+#define AS_MEMATTR_WRITE_ALLOC 0x8Dull
+
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_IMPL_DEF 0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_WA 0x8Dull
+
+/* Use GPU implementation-defined caching policy. */
+#define AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY 0x48ull
+/* The attribute set to force all resources to be cached. */
+#define AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL 0x4Full
+/* Inner write-alloc cache setup, no outer caching */
+#define AS_MEMATTR_LPAE_WRITE_ALLOC 0x4Dull
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_LPAE_OUTER_IMPL_DEF 0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_LPAE_OUTER_WA 0x8Dull
+
+/* Symbol for default MEMATTR to use */
+
+/* Default is - HW implementation defined caching */
+#define AS_MEMATTR_INDEX_DEFAULT 0
+#define AS_MEMATTR_INDEX_DEFAULT_ACE 3
+
+/* HW implementation defined caching */
+#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
+/* Force cache on */
+#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL 1
+/* Write-alloc */
+#define AS_MEMATTR_INDEX_WRITE_ALLOC 2
+/* Outer coherent, inner implementation defined policy */
+#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF 3
+/* Outer coherent, write alloc inner */
+#define AS_MEMATTR_INDEX_OUTER_WA 4
+
+/* JS<n>_FEATURES register */
+
+#define JS_FEATURE_NULL_JOB (1u << 1)
+#define JS_FEATURE_SET_VALUE_JOB (1u << 2)
+#define JS_FEATURE_CACHE_FLUSH_JOB (1u << 3)
+#define JS_FEATURE_COMPUTE_JOB (1u << 4)
+#define JS_FEATURE_VERTEX_JOB (1u << 5)
+#define JS_FEATURE_GEOMETRY_JOB (1u << 6)
+#define JS_FEATURE_TILER_JOB (1u << 7)
+#define JS_FEATURE_FUSED_JOB (1u << 8)
+#define JS_FEATURE_FRAGMENT_JOB (1u << 9)
+
+/* End JS<n>_FEATURES register */
+
+/* L2_MMU_CONFIG register */
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT (23)
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT (24)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT (26)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+/* End L2_MMU_CONFIG register */
+
+/* THREAD_* registers */
+
+/* THREAD_FEATURES IMPLEMENTATION_TECHNOLOGY values */
+#define IMPLEMENTATION_UNSPECIFIED 0
+#define IMPLEMENTATION_SILICON 1
+#define IMPLEMENTATION_FPGA 2
+#define IMPLEMENTATION_MODEL 3
+
+/* Default values when registers are not supported by the implemented hardware */
+#define THREAD_MT_DEFAULT 256
+#define THREAD_MWS_DEFAULT 256
+#define THREAD_MBS_DEFAULT 256
+#define THREAD_MR_DEFAULT 1024
+#define THREAD_MTQ_DEFAULT 4
+#define THREAD_MTGS_DEFAULT 10
+
+/* End THREAD_* registers */
+
+/* SHADER_CONFIG register */
+
+#define SC_ALT_COUNTERS (1ul << 3)
+#define SC_OVERRIDE_FWD_PIXEL_KILL (1ul << 4)
+#define SC_SDC_DISABLE_OQ_DISCARD (1ul << 6)
+#define SC_LS_ALLOW_ATTR_TYPES (1ul << 16)
+#define SC_LS_PAUSEBUFFER_DISABLE (1ul << 16)
+#define SC_LS_ATTR_CHECK_DISABLE (1ul << 18)
+#define SC_ENABLE_TEXGRD_FLAGS (1ul << 25)
+/* End SHADER_CONFIG register */
+
+/* TILER_CONFIG register */
+
+#define TC_CLOCK_GATE_OVERRIDE (1ul << 0)
+
+/* End TILER_CONFIG register */
+
+#endif /* _MIDGARD_REGMAP_H_ */
diff --git a/midgard-0.r2p0/mali_timeline.h b/midgard-0.r2p0/mali_timeline.h
new file mode 100755
index 0000000..bd5f661
--- /dev/null
+++ b/midgard-0.r2p0/mali_timeline.h
@@ -0,0 +1,396 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali_timeline
+
+#if !defined(_MALI_TIMELINE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _MALI_TIMELINE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(mali_timeline_atoms_in_flight,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int tgid,
+ int count),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ tgid,
+ count),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, tgid)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->tgid = tgid;
+ __entry->count = count;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i", CTX_SET_NR_ATOMS_IN_FLIGHT,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->count)
+);
+
+
+TRACE_EVENT(mali_timeline_atom,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int tgid,
+ int atom_id),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ tgid,
+ atom_id),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, tgid)
+ __field(int, atom_id)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->tgid = tgid;
+ __entry->atom_id = atom_id;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->atom_id,
+ __entry->atom_id)
+);
+
+TRACE_EVENT(mali_timeline_gpu_slot_active,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int tgid,
+ int js,
+ int count),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ tgid,
+ js,
+ count),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, tgid)
+ __field(int, js)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->tgid = tgid;
+ __entry->js = js;
+ __entry->count = count;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->js,
+ __entry->count)
+);
+
+TRACE_EVENT(mali_timeline_gpu_slot_action,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int tgid,
+ int js,
+ int count),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ tgid,
+ js,
+ count),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, tgid)
+ __field(int, js)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->tgid = tgid;
+ __entry->js = js;
+ __entry->count = count;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->js,
+ __entry->count)
+);
+
+TRACE_EVENT(mali_timeline_gpu_power_active,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int active),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ active),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, active)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->active = active;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->active)
+
+);
+
+TRACE_EVENT(mali_timeline_l2_power_active,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int state),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ state),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, state)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->state = state;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->state)
+
+);
+TRACE_EVENT(mali_timeline_pm_event,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int pm_event_type,
+ unsigned int pm_event_id),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ pm_event_type,
+ pm_event_id),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, pm_event_type)
+ __field(unsigned int, pm_event_id)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->pm_event_type = pm_event_type;
+ __entry->pm_event_id = pm_event_id;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i,%u", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->pm_event_type, __entry->pm_event_id)
+
+);
+
+TRACE_EVENT(mali_timeline_slot_atom,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int event_type,
+ int tgid,
+ int js,
+ int atom_id),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ event_type,
+ tgid,
+ js,
+ atom_id),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, event_type)
+ __field(int, tgid)
+ __field(int, js)
+ __field(int, atom_id)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->event_type = event_type;
+ __entry->tgid = tgid;
+ __entry->js = js;
+ __entry->atom_id = atom_id;
+ ),
+
+ TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->tgid,
+ __entry->js,
+ __entry->atom_id)
+);
+
+TRACE_EVENT(mali_timeline_pm_checktrans,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int trans_code,
+ int trans_id),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ trans_code,
+ trans_id),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, trans_code)
+ __field(int, trans_id)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->trans_code = trans_code;
+ __entry->trans_id = trans_id;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i", __entry->trans_code,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->trans_id)
+
+);
+
+TRACE_EVENT(mali_timeline_context_active,
+
+ TP_PROTO(u64 ts_sec,
+ u32 ts_nsec,
+ int count),
+
+ TP_ARGS(ts_sec,
+ ts_nsec,
+ count),
+
+ TP_STRUCT__entry(
+ __field(u64, ts_sec)
+ __field(u32, ts_nsec)
+ __field(int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->ts_sec = ts_sec;
+ __entry->ts_nsec = ts_nsec;
+ __entry->count = count;
+ ),
+
+ TP_printk("%i,%i.%.9i,0,%i", SW_SET_CONTEXT_ACTIVE,
+ (int)__entry->ts_sec,
+ (int)__entry->ts_nsec,
+ __entry->count)
+);
+
+#endif /* _MALI_TIMELINE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/midgard-0.r2p0/mali_uk.h b/midgard-0.r2p0/mali_uk.h
new file mode 100755
index 0000000..841d03f
--- /dev/null
+++ b/midgard-0.r2p0/mali_uk.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_uk.h
+ * Types and definitions that are common across OSs for both the user
+ * and kernel side of the User-Kernel interface.
+ */
+
+#ifndef _UK_H_
+#define _UK_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @defgroup uk_api User-Kernel Interface API
+ *
+ * The User-Kernel Interface abstracts the communication mechanism between the user and kernel-side code of device
+ * drivers developed as part of the Midgard DDK. Currently that includes the Base driver and the UMP driver.
+ *
+ * It exposes an OS independent API to user-side code (UKU) which routes functions calls to an OS-independent
+ * kernel-side API (UKK) via an OS-specific communication mechanism.
+ *
+ * This API is internal to the Midgard DDK and is not exposed to any applications.
+ *
+ * @{
+ */
+
+/**
+ * These are identifiers for kernel-side drivers implementing a UK interface, aka UKK clients. The
+ * UK module maps this to an OS specific device name, e.g. "gpu_base" -> "GPU0:". Specify this
+ * identifier to select a UKK client to the uku_open() function.
+ *
+ * When a new UKK client driver is created a new identifier needs to be added to the uk_client_id
+ * enumeration and the uku_open() implemenation for the various OS ports need to be updated to
+ * provide a mapping of the identifier to the OS specific device name.
+ *
+ */
+enum uk_client_id {
+ /**
+ * Value used to identify the Base driver UK client.
+ */
+ UK_CLIENT_MALI_T600_BASE,
+
+ /** The number of uk clients supported. This must be the last member of the enum */
+ UK_CLIENT_COUNT
+};
+
+/**
+ * Each function callable through the UK interface has a unique number.
+ * Functions provided by UK clients start from number UK_FUNC_ID.
+ * Numbers below UK_FUNC_ID are used for internal UK functions.
+ */
+enum uk_func {
+ UKP_FUNC_ID_CHECK_VERSION, /**< UKK Core internal function */
+ /**
+ * Each UK client numbers the functions they provide starting from
+ * number UK_FUNC_ID. This number is then eventually assigned to the
+ * id field of the union uk_header structure when preparing to make a
+ * UK call. See your UK client for a list of their function numbers.
+ */
+ UK_FUNC_ID = 512
+};
+
+/**
+ * Arguments for a UK call are stored in a structure. This structure consists
+ * of a fixed size header and a payload. The header carries a 32-bit number
+ * identifying the UK function to be called (see uk_func). When the UKK client
+ * receives this header and executed the requested UK function, it will use
+ * the same header to store the result of the function in the form of a
+ * int return code. The size of this structure is such that the
+ * first member of the payload following the header can be accessed efficiently
+ * on a 32 and 64-bit kernel and the structure has the same size regardless
+ * of a 32 or 64-bit kernel. The uk_kernel_size_type type should be defined
+ * accordingly in the OS specific mali_uk_os.h header file.
+ */
+union uk_header {
+ /**
+ * 32-bit number identifying the UK function to be called.
+ * Also see uk_func.
+ */
+ u32 id;
+ /**
+ * The int return code returned by the called UK function.
+ * See the specification of the particular UK function you are
+ * calling for the meaning of the error codes returned. All
+ * UK functions return 0 on success.
+ */
+ u32 ret;
+ /*
+ * Used to ensure 64-bit alignment of this union. Do not remove.
+ * This field is used for padding and does not need to be initialized.
+ */
+ u64 sizer;
+};
+
+/**
+ * This structure carries a 16-bit major and minor number and is sent along with an internal UK call
+ * used during uku_open to identify the versions of the UK module in use by the user-side and kernel-side.
+ */
+struct uku_version_check_args {
+ union uk_header header;
+ /**< UK call header */
+ u16 major;
+ /**< This field carries the user-side major version on input and the kernel-side major version on output */
+ u16 minor;
+ /**< This field carries the user-side minor version on input and the kernel-side minor version on output. */
+ u8 padding[4];
+};
+
+/** @} end group uk_api */
+
+/** @} *//* end group base_api */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* _UK_H_ */
diff --git a/midgard-0.r2p0/patches/arm64-standalone-headers.patch b/midgard-0.r2p0/patches/arm64-standalone-headers.patch
new file mode 100644
index 0000000..969e387
--- /dev/null
+++ b/midgard-0.r2p0/patches/arm64-standalone-headers.patch
@@ -0,0 +1,532 @@
+diff -urN linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/opcodes.h linux-headers-4.8.0-2-common/arch/arm64/include/asm/opcodes.h
+--- linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/opcodes.h 2016-12-15 15:29:37.330706881 +0000
++++ linux-headers-4.8.0-2-common/arch/arm64/include/asm/opcodes.h 1970-01-01 00:00:00.000000000 +0000
+@@ -1,5 +0,0 @@
+-#ifdef CONFIG_CPU_BIG_ENDIAN
+-#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
+-#endif
+-
+-#include <../../arm/include/asm/opcodes.h>
+diff -urN linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/sysreg.h linux-headers-4.8.0-2-common/arch/arm64/include/asm/sysreg.h
+--- linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/sysreg.h 2016-12-15 15:29:37.334706963 +0000
++++ linux-headers-4.8.0-2-common/arch/arm64/include/asm/sysreg.h 2016-12-29 17:53:10.380735552 +0000
+@@ -22,8 +22,6 @@
+
+ #include <linux/stringify.h>
+
+-#include <asm/opcodes.h>
+-
+ /*
+ * ARMv8 ARM reserves the following encoding for system registers:
+ * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
+@@ -37,6 +35,14 @@
+ #define sys_reg(op0, op1, crn, crm, op2) \
+ ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
++#ifdef __ASSEMBLY__
++#define ___emit_inst(x) .inst x
++#else
++#define ___emit_inst(x) ".inst " __stringify(x) "\n\t"
++#endif
++
++#define __emit_inst(x) ___emit_inst((x))
++
+ #define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
+ #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
+ #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
+@@ -81,10 +87,10 @@
+ #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
+ #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+
+-#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
+- (!!x)<<8 | 0x1f)
+-#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
+- (!!x)<<8 | 0x1f)
++#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
++ (!!x)<<8 | 0x1f)
++#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
++ (!!x)<<8 | 0x1f)
+
+ /* Common SCTLR_ELx flags. */
+ #define SCTLR_ELx_EE (1 << 25)
+diff -urN linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/hypercall.h linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/hypercall.h
+--- linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/hypercall.h 2016-12-15 15:29:37.334706963 +0000
++++ linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/hypercall.h 2016-12-29 18:50:39.875430462 +0000
+@@ -1 +1 @@
+-#include <../../arm/include/asm/xen/hypercall.h>
++#include <xen/arm/hypercall.h>
+diff -urN linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/hypervisor.h linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/hypervisor.h
+--- linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/hypervisor.h 2016-12-15 15:29:37.334706963 +0000
++++ linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/hypervisor.h 2016-12-29 18:50:56.119697557 +0000
+@@ -1 +1 @@
+-#include <../../arm/include/asm/xen/hypervisor.h>
++#include <xen/arm/hypervisor.h>
+diff -urN linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/interface.h linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/interface.h
+--- linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/interface.h 2016-12-15 15:29:37.334706963 +0000
++++ linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/interface.h 2016-12-29 18:50:33.559326677 +0000
+@@ -1 +1 @@
+-#include <../../arm/include/asm/xen/interface.h>
++#include <xen/arm/interface.h>
+diff -urN linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/page-coherent.h linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/page-coherent.h
+--- linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/page-coherent.h 2016-12-15 15:29:37.334706963 +0000
++++ linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/page-coherent.h 2016-12-29 18:50:48.011564209 +0000
+@@ -1 +1 @@
+-#include <../../arm/include/asm/xen/page-coherent.h>
++#include <xen/arm/page-coherent.h>
+diff -urN linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/page.h linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/page.h
+--- linux-headers-4.8.0-2-common-unpatched/arch/arm64/include/asm/xen/page.h 2016-12-15 15:29:37.334706963 +0000
++++ linux-headers-4.8.0-2-common/arch/arm64/include/asm/xen/page.h 2016-12-29 18:50:28.155237909 +0000
+@@ -1 +1 @@
+-#include <../../arm/include/asm/xen/page.h>
++#include <xen/arm/page.h>
+diff -urN linux-headers-4.8.0-2-common-unpatched/include/xen/arm/hypercall.h linux-headers-4.8.0-2-common/include/xen/arm/hypercall.h
+--- linux-headers-4.8.0-2-common-unpatched/include/xen/arm/hypercall.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-headers-4.8.0-2-common/include/xen/arm/hypercall.h 2016-12-29 18:32:54.781934916 +0000
+@@ -0,0 +1,87 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _ASM_ARM_XEN_HYPERCALL_H
++#define _ASM_ARM_XEN_HYPERCALL_H
++
++#include <linux/bug.h>
++
++#include <xen/interface/xen.h>
++#include <xen/interface/sched.h>
++#include <xen/interface/platform.h>
++
++long privcmd_call(unsigned call, unsigned long a1,
++ unsigned long a2, unsigned long a3,
++ unsigned long a4, unsigned long a5);
++int HYPERVISOR_xen_version(int cmd, void *arg);
++int HYPERVISOR_console_io(int cmd, int count, char *str);
++int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
++int HYPERVISOR_sched_op(int cmd, void *arg);
++int HYPERVISOR_event_channel_op(int cmd, void *arg);
++unsigned long HYPERVISOR_hvm_op(int op, void *arg);
++int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
++int HYPERVISOR_physdev_op(int cmd, void *arg);
++int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
++int HYPERVISOR_tmem_op(void *arg);
++int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
++int HYPERVISOR_platform_op_raw(void *arg);
++static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
++{
++ op->interface_version = XENPF_INTERFACE_VERSION;
++ return HYPERVISOR_platform_op_raw(op);
++}
++int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
++
++static inline int
++HYPERVISOR_suspend(unsigned long start_info_mfn)
++{
++ struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
++
++ /* start_info_mfn is unused on ARM */
++ return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
++}
++
++static inline void
++MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
++ unsigned int new_val, unsigned long flags)
++{
++ BUG();
++}
++
++static inline void
++MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
++ int count, int *success_count, domid_t domid)
++{
++ BUG();
++}
++
++#endif /* _ASM_ARM_XEN_HYPERCALL_H */
+diff -urN linux-headers-4.8.0-2-common-unpatched/include/xen/arm/hypervisor.h linux-headers-4.8.0-2-common/include/xen/arm/hypervisor.h
+--- linux-headers-4.8.0-2-common-unpatched/include/xen/arm/hypervisor.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-headers-4.8.0-2-common/include/xen/arm/hypervisor.h 2016-12-29 18:38:24.175590765 +0000
+@@ -0,0 +1,39 @@
++#ifndef _ASM_ARM_XEN_HYPERVISOR_H
++#define _ASM_ARM_XEN_HYPERVISOR_H
++
++#include <linux/init.h>
++
++extern struct shared_info *HYPERVISOR_shared_info;
++extern struct start_info *xen_start_info;
++
++/* Lazy mode for batching updates / context switch */
++enum paravirt_lazy_mode {
++ PARAVIRT_LAZY_NONE,
++ PARAVIRT_LAZY_MMU,
++ PARAVIRT_LAZY_CPU,
++};
++
++static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
++{
++ return PARAVIRT_LAZY_NONE;
++}
++
++extern struct dma_map_ops *xen_dma_ops;
++
++#ifdef CONFIG_XEN
++void __init xen_early_init(void);
++#else
++static inline void xen_early_init(void) { return; }
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++static inline void xen_arch_register_cpu(int num)
++{
++}
++
++static inline void xen_arch_unregister_cpu(int num)
++{
++}
++#endif
++
++#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
+diff -urN linux-headers-4.8.0-2-common-unpatched/include/xen/arm/interface.h linux-headers-4.8.0-2-common/include/xen/arm/interface.h
+--- linux-headers-4.8.0-2-common-unpatched/include/xen/arm/interface.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-headers-4.8.0-2-common/include/xen/arm/interface.h 2016-12-29 18:45:12.422113087 +0000
+@@ -0,0 +1,85 @@
++/******************************************************************************
++ * Guest OS interface to ARM Xen.
++ *
++ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
++ */
++
++#ifndef _ASM_ARM_XEN_INTERFACE_H
++#define _ASM_ARM_XEN_INTERFACE_H
++
++#include <linux/types.h>
++
++#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
++
++#define __DEFINE_GUEST_HANDLE(name, type) \
++ typedef struct { union { type *p; uint64_aligned_t q; }; } \
++ __guest_handle_ ## name
++
++#define DEFINE_GUEST_HANDLE_STRUCT(name) \
++ __DEFINE_GUEST_HANDLE(name, struct name)
++#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
++#define GUEST_HANDLE(name) __guest_handle_ ## name
++
++#define set_xen_guest_handle(hnd, val) \
++ do { \
++ if (sizeof(hnd) == 8) \
++ *(uint64_t *)&(hnd) = 0; \
++ (hnd).p = val; \
++ } while (0)
++
++#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
++
++#ifndef __ASSEMBLY__
++/* Explicitly size integers that represent pfns in the interface with
++ * Xen so that we can have one ABI that works for 32 and 64 bit guests.
++ * Note that this means that the xen_pfn_t type may be capable of
++ * representing pfn's which the guest cannot represent in its own pfn
++ * type. However since pfn space is controlled by the guest this is
++ * fine since it simply wouldn't be able to create any sure pfns in
++ * the first place.
++ */
++typedef uint64_t xen_pfn_t;
++#define PRI_xen_pfn "llx"
++typedef uint64_t xen_ulong_t;
++#define PRI_xen_ulong "llx"
++typedef int64_t xen_long_t;
++#define PRI_xen_long "llx"
++/* Guest handles for primitive C types. */
++__DEFINE_GUEST_HANDLE(uchar, unsigned char);
++__DEFINE_GUEST_HANDLE(uint, unsigned int);
++DEFINE_GUEST_HANDLE(char);
++DEFINE_GUEST_HANDLE(int);
++DEFINE_GUEST_HANDLE(void);
++DEFINE_GUEST_HANDLE(uint64_t);
++DEFINE_GUEST_HANDLE(uint32_t);
++DEFINE_GUEST_HANDLE(xen_pfn_t);
++DEFINE_GUEST_HANDLE(xen_ulong_t);
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++#define MAX_VIRT_CPUS 1
++
++struct arch_vcpu_info { };
++struct arch_shared_info { };
++
++/* TODO: Move pvclock definitions some place arch independent */
++struct pvclock_vcpu_time_info {
++ u32 version;
++ u32 pad0;
++ u64 tsc_timestamp;
++ u64 system_time;
++ u32 tsc_to_system_mul;
++ s8 tsc_shift;
++ u8 flags;
++ u8 pad[2];
++} __attribute__((__packed__)); /* 32 bytes */
++
++/* It is OK to have a 12 bytes struct with no padding because it is packed */
++struct pvclock_wall_clock {
++ u32 version;
++ u32 sec;
++ u32 nsec;
++ u32 sec_hi;
++} __attribute__((__packed__));
++#endif
++
++#endif /* _ASM_ARM_XEN_INTERFACE_H */
+diff -urN linux-headers-4.8.0-2-common-unpatched/include/xen/arm/page-coherent.h linux-headers-4.8.0-2-common/include/xen/arm/page-coherent.h
+--- linux-headers-4.8.0-2-common-unpatched/include/xen/arm/page-coherent.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-headers-4.8.0-2-common/include/xen/arm/page-coherent.h 2016-12-29 18:45:54.666790395 +0000
+@@ -0,0 +1,98 @@
++#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
++#define _ASM_ARM_XEN_PAGE_COHERENT_H
++
++#include <asm/page.h>
++#include <linux/dma-mapping.h>
++
++void __xen_dma_map_page(struct device *hwdev, struct page *page,
++ dma_addr_t dev_addr, unsigned long offset, size_t size,
++ enum dma_data_direction dir, unsigned long attrs);
++void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir,
++ unsigned long attrs);
++void __xen_dma_sync_single_for_cpu(struct device *hwdev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir);
++
++void __xen_dma_sync_single_for_device(struct device *hwdev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir);
++
++static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
++{
++ return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
++}
++
++static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
++ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
++{
++ __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
++}
++
++static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
++ dma_addr_t dev_addr, unsigned long offset, size_t size,
++ enum dma_data_direction dir, unsigned long attrs)
++{
++ unsigned long page_pfn = page_to_xen_pfn(page);
++ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
++ unsigned long compound_pages =
++ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
++ bool local = (page_pfn <= dev_pfn) &&
++ (dev_pfn - page_pfn < compound_pages);
++
++ /*
++ * Dom0 is mapped 1:1, while the Linux page can span across
++ * multiple Xen pages, it's not possible for it to contain a
++ * mix of local and foreign Xen pages. So if the first xen_pfn
++ * == mfn the page is local otherwise it's a foreign page
++ * grant-mapped in dom0. If the page is local we can safely
++ * call the native dma_ops function, otherwise we call the xen
++ * specific function.
++ */
++ if (local)
++ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
++ else
++ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
++}
++
++static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir, unsigned long attrs)
++{
++ unsigned long pfn = PFN_DOWN(handle);
++ /*
++ * Dom0 is mapped 1:1, while the Linux page can be spanned accross
++ * multiple Xen page, it's not possible to have a mix of local and
++ * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
++ * foreign mfn will always return false. If the page is local we can
++ * safely call the native dma_ops function, otherwise we call the xen
++ * specific function.
++ */
++ if (pfn_valid(pfn)) {
++ if (__generic_dma_ops(hwdev)->unmap_page)
++ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
++ } else
++ __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
++}
++
++static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ unsigned long pfn = PFN_DOWN(handle);
++ if (pfn_valid(pfn)) {
++ if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
++ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
++ } else
++ __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
++}
++
++static inline void xen_dma_sync_single_for_device(struct device *hwdev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ unsigned long pfn = PFN_DOWN(handle);
++ if (pfn_valid(pfn)) {
++ if (__generic_dma_ops(hwdev)->sync_single_for_device)
++ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir;
++ } else
++ __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
++}
++
++#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+diff -urN linux-headers-4.8.0-2-common-unpatched/include/xen/arm/page.h linux-headers-4.8.0-2-common/include/xen/arm/page.h
+--- linux-headers-4.8.0-2-common-unpatched/include/xen/arm/page.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-headers-4.8.0-2-common/include/xen/arm/page.h 2016-12-29 18:46:47.371639580 +0000
+@@ -0,0 +1,122 @@
++#ifndef _ASM_ARM_XEN_PAGE_H
++#define _ASM_ARM_XEN_PAGE_H
++
++#include <asm/page.h>
++#include <asm/pgtable.h>
++
++#include <linux/pfn.h>
++#include <linux/types.h>
++#include <linux/dma-mapping.h>
++
++#include <xen/xen.h>
++#include <xen/interface/grant_table.h>
++
++#define phys_to_machine_mapping_valid(pfn) (1)
++
++/* Xen machine address */
++typedef struct xmaddr {
++ phys_addr_t maddr;
++} xmaddr_t;
++
++/* Xen pseudo-physical address */
++typedef struct xpaddr {
++ phys_addr_t paddr;
++} xpaddr_t;
++
++#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
++#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
++
++#define INVALID_P2M_ENTRY (~0UL)
++
++/*
++ * The pseudo-physical frame (pfn) used in all the helpers is always based
++ * on Xen page granularity (i.e 4KB).
++ *
++ * A Linux page may be split across multiple non-contiguous Xen page so we
++ * have to keep track with frame based on 4KB page granularity.
++ *
++ * PV drivers should never make a direct usage of those helpers (particularly
++ * pfn_to_gfn and gfn_to_pfn).
++ */
++
++unsigned long __pfn_to_mfn(unsigned long pfn);
++extern struct rb_root phys_to_mach;
++
++/* Pseudo-physical <-> Guest conversion */
++static inline unsigned long pfn_to_gfn(unsigned long pfn)
++{
++ return pfn;
++}
++
++static inline unsigned long gfn_to_pfn(unsigned long gfn)
++{
++ return gfn;
++}
++
++/* Pseudo-physical <-> BUS conversion */
++static inline unsigned long pfn_to_bfn(unsigned long pfn)
++{
++ unsigned long mfn;
++
++ if (phys_to_mach.rb_node != NULL) {
++ mfn = __pfn_to_mfn(pfn);
++ if (mfn != INVALID_P2M_ENTRY)
++ return mfn;
++ }
++
++ return pfn;
++}
++
++static inline unsigned long bfn_to_pfn(unsigned long bfn)
++{
++ return bfn;
++}
++
++#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
++
++/* VIRT <-> GUEST conversion */
++#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
++#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
++
++/* Only used in PV code. But ARM guests are always HVM. */
++static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
++{
++ BUG();
++}
++
++/* TODO: this shouldn't be here but it is because the frontend drivers
++ * are using it (its rolled in headers) even though we won't hit the code path.
++ * So for right now just punt with this.
++ */
++static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
++{
++ BUG();
++ return NULL;
++}
++
++extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
++ struct gnttab_map_grant_ref *kmap_ops,
++ struct page **pages, unsigned int count);
++
++extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
++ struct gnttab_unmap_grant_ref *kunmap_ops,
++ struct page **pages, unsigned int count);
++
++bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
++bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
++ unsigned long nr_pages);
++
++static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ return __set_phys_to_machine(pfn, mfn);
++}
++
++#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
++#define xen_unmap(cookie) iounmap((cookie))
++
++bool xen_arch_need_swiotlb(struct device *dev,
++ phys_addr_t phys,
++ dma_addr_t dev_addr);
++unsigned long xen_get_swiotlb_free_pages(unsigned int order);
++
++#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/midgard-0.r2p0/platform/Kbuild b/midgard-0.r2p0/platform/Kbuild
new file mode 100755
index 0000000..558657b
--- /dev/null
+++ b/midgard-0.r2p0/platform/Kbuild
@@ -0,0 +1,21 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y)
+# remove begin and end quotes from the Kconfig string type
+ platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME))
+ obj-y += $(platform_name)/
+endif
diff --git a/midgard-0.r2p0/platform/Kconfig b/midgard-0.r2p0/platform/Kconfig
new file mode 100755
index 0000000..8fb4e91
--- /dev/null
+++ b/midgard-0.r2p0/platform/Kconfig
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+
+# Add your platform specific Kconfig file here
+#
+# "drivers/gpu/arm/midgard/platform/xxx/Kconfig"
+#
+# Where xxx is the platform name is the name set in MALI_PLATFORM_THIRDPARTY_NAME
+#
+
diff --git a/midgard-0.r2p0/platform/devicetree/Kbuild b/midgard-0.r2p0/platform/devicetree/Kbuild
new file mode 100755
index 0000000..b5a49f3
--- /dev/null
+++ b/midgard-0.r2p0/platform/devicetree/Kbuild
@@ -0,0 +1,22 @@
+#
+# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+ifeq ($(CONFIG_MALI_MIDGARD),y)
+obj-y += mali_kbase_runtime_pm.c
+obj-y += mali_kbase_config_devicetree.c
+else ifeq ($(CONFIG_MALI_MIDGARD),m)
+SRC += platform/devicetree/mali_kbase_runtime_pm.c
+SRC += platform/devicetree/mali_kbase_config_devicetree.c
+endif
diff --git a/midgard-0.r2p0/platform/devicetree/mali_kbase_config_devicetree.c b/midgard-0.r2p0/platform/devicetree/mali_kbase_config_devicetree.c
new file mode 100755
index 0000000..b2a7c93
--- /dev/null
+++ b/midgard-0.r2p0/platform/devicetree/mali_kbase_config_devicetree.c
@@ -0,0 +1,31 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase_config.h>
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
+
+static struct kbase_platform_config dummy_platform_config;
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &dummy_platform_config;
+}
diff --git a/midgard-0.r2p0/platform/devicetree/mali_kbase_config_platform.h b/midgard-0.r2p0/platform/devicetree/mali_kbase_config_platform.h
new file mode 100755
index 0000000..34f6d57
--- /dev/null
+++ b/midgard-0.r2p0/platform/devicetree/mali_kbase_config_platform.h
@@ -0,0 +1,80 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX (5000)
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN (5000)
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (NULL)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
+
+/**
+ * Protected mode switch
+ *
+ * Attached value: pointer to @ref kbase_protected_ops
+ */
+#define PROTECTED_CALLBACKS (NULL)
diff --git a/midgard-0.r2p0/platform/devicetree/mali_kbase_runtime_pm.c b/midgard-0.r2p0/platform/devicetree/mali_kbase_runtime_pm.c
new file mode 100755
index 0000000..aa4376a
--- /dev/null
+++ b/midgard-0.r2p0/platform/devicetree/mali_kbase_runtime_pm.c
@@ -0,0 +1,100 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ int ret;
+
+ dev_dbg(kbdev->dev, "pm_callback_power_on %p\n",
+ (void *)kbdev->dev->pm_domain);
+
+ ret = pm_runtime_get_sync(kbdev->dev);
+
+ dev_dbg(kbdev->dev, "pm_runtime_get returned %d\n", ret);
+
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "pm_callback_power_off\n");
+
+ pm_runtime_put_autosuspend(kbdev->dev);
+}
+
+int kbase_device_runtime_init(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "kbase_device_runtime_init\n");
+ pm_runtime_enable(kbdev->dev);
+
+ return 0;
+}
+
+void kbase_device_runtime_disable(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n");
+ pm_runtime_disable(kbdev->dev);
+}
+
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "pm_callback_runtime_on\n");
+
+ return 0;
+}
+
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+ dev_dbg(kbdev->dev, "pm_callback_runtime_off\n");
+}
+
+static void pm_callback_resume(struct kbase_device *kbdev)
+{
+ int ret = pm_callback_runtime_on(kbdev);
+
+ WARN_ON(ret);
+}
+
+static void pm_callback_suspend(struct kbase_device *kbdev)
+{
+ pm_callback_runtime_off(kbdev);
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = pm_callback_suspend,
+ .power_resume_callback = pm_callback_resume,
+#ifdef KBASE_PM_RUNTIME
+ .power_runtime_init_callback = kbase_device_runtime_init,
+ .power_runtime_term_callback = kbase_device_runtime_disable,
+ .power_runtime_on_callback = pm_callback_runtime_on,
+ .power_runtime_off_callback = pm_callback_runtime_off,
+#else /* KBASE_PM_RUNTIME */
+ .power_runtime_init_callback = NULL,
+ .power_runtime_term_callback = NULL,
+ .power_runtime_on_callback = NULL,
+ .power_runtime_off_callback = NULL,
+#endif /* KBASE_PM_RUNTIME */
+};
+
+
diff --git a/midgard-0.r2p0/platform/juno_soc/Kbuild b/midgard-0.r2p0/platform/juno_soc/Kbuild
new file mode 100755
index 0000000..5b6ef37
--- /dev/null
+++ b/midgard-0.r2p0/platform/juno_soc/Kbuild
@@ -0,0 +1,19 @@
+#
+# (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+obj-y += mali_kbase_config_juno_soc.o
+
+
+obj-m += juno_mali_opp.o
diff --git a/midgard-0.r2p0/platform/juno_soc/juno_mali_opp.c b/midgard-0.r2p0/platform/juno_soc/juno_mali_opp.c
new file mode 100755
index 0000000..ccfd8cc
--- /dev/null
+++ b/midgard-0.r2p0/platform/juno_soc/juno_mali_opp.c
@@ -0,0 +1,84 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/scpi_protocol.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else /* Linux >= 3.13 */
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#include <linux/opp.h>
+#define dev_pm_opp_add opp_add
+#endif /* Linux >= 3.13 */
+
+
+static int init_juno_opps_from_scpi(struct device *dev)
+{
+ struct scpi_opp *sopp;
+ int i;
+
+ /* Hard coded for Juno. 2 is GPU domain */
+ sopp = scpi_dvfs_get_opps(2);
+ if (IS_ERR_OR_NULL(sopp))
+ return PTR_ERR(sopp);
+
+ for (i = 0; i < sopp->count; i++) {
+ struct scpi_opp_entry *e = &sopp->opp[i];
+
+ dev_info(dev, "Mali OPP from SCPI: %u Hz @ %u mV\n",
+ e->freq_hz, e->volt_mv);
+
+ dev_pm_opp_add(dev, e->freq_hz, e->volt_mv * 1000);
+ }
+
+ return 0;
+}
+
+static int juno_setup_opps(void)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+ int err;
+
+ np = of_find_node_by_name(NULL, "gpu");
+ if (!np) {
+ pr_err("Failed to find DT entry for Mali\n");
+ return -EFAULT;
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ pr_err("Failed to find device for Mali\n");
+ of_node_put(np);
+ return -EFAULT;
+ }
+
+ err = init_juno_opps_from_scpi(&pdev->dev);
+
+ of_node_put(np);
+
+ return err;
+}
+
+module_init(juno_setup_opps);
+MODULE_LICENSE("GPL");
diff --git a/midgard-0.r2p0/platform/juno_soc/mali_kbase_config_juno_soc.c b/midgard-0.r2p0/platform/juno_soc/mali_kbase_config_juno_soc.c
new file mode 100755
index 0000000..c654818
--- /dev/null
+++ b/midgard-0.r2p0/platform/juno_soc/mali_kbase_config_juno_soc.c
@@ -0,0 +1,156 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <linux/thermal.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#include <mali_kbase_smc.h>
+
+/* Versatile Express (VE) Juno Development Platform */
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 65,
+ .mmu_irq_number = 66,
+ .gpu_irq_number = 64,
+ .io_memory_region = {
+ .start = 0x2D000000,
+ .end = 0x2D000000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = NULL,
+ .power_resume_callback = NULL
+};
+
+/*
+ * Juno Protected Mode integration
+ */
+
+/* SMC Function Numbers */
+#define JUNO_SMC_PROTECTED_ENTER_FUNC 0xff06
+#define JUNO_SMC_PROTECTED_RESET_FUNC 0xff07
+
+static int juno_protected_mode_enter(struct kbase_device *kbdev)
+{
+ /* T62X in SoC detected */
+ u64 ret = kbase_invoke_smc(SMC_OEN_SIP,
+ JUNO_SMC_PROTECTED_ENTER_FUNC, false,
+ 0, 0, 0);
+ return ret;
+}
+
+/* TODO: Remove these externs, reset should should be done by the firmware */
+extern void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
+ struct kbase_context *kctx);
+
+extern u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
+ struct kbase_context *kctx);
+
+static int juno_protected_mode_reset(struct kbase_device *kbdev)
+{
+
+ /* T62X in SoC detected */
+ u64 ret = kbase_invoke_smc(SMC_OEN_SIP,
+ JUNO_SMC_PROTECTED_RESET_FUNC, false,
+ 0, 0, 0);
+
+ /* TODO: Remove this reset, it should be done by the firmware */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ GPU_COMMAND_HARD_RESET, NULL);
+
+ while ((kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL)
+ & RESET_COMPLETED) != RESET_COMPLETED)
+ ;
+
+ return ret;
+}
+
+static bool juno_protected_mode_supported(struct kbase_device *kbdev)
+{
+ u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+
+ /*
+ * Protected mode is only supported for the built in GPU
+ * _and_ only if the right firmware is running.
+ *
+ * Given that at init time the GPU is not powered up the
+ * juno_protected_mode_reset function can't be used as
+ * is needs to access GPU registers.
+ * However, although we don't want the GPU to boot into
+ * protected mode we know a GPU reset will be done after
+ * this function is called so although we set the GPU to
+ * protected mode it will exit protected mode before the
+ * driver is ready to run work.
+ */
+ if (gpu_id == GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0) &&
+ (kbdev->reg_start == 0x2d000000))
+ return juno_protected_mode_enter(kbdev) == 0;
+
+ return false;
+}
+
+struct kbase_protected_ops juno_protected_ops = {
+ .protected_mode_enter = juno_protected_mode_enter,
+ .protected_mode_reset = juno_protected_mode_reset,
+ .protected_mode_supported = juno_protected_mode_supported,
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &versatile_platform_config;
+}
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
diff --git a/midgard-0.r2p0/platform/juno_soc/mali_kbase_config_platform.h b/midgard-0.r2p0/platform/juno_soc/mali_kbase_config_platform.h
new file mode 100755
index 0000000..ab29e9d
--- /dev/null
+++ b/midgard-0.r2p0/platform/juno_soc/mali_kbase_config_platform.h
@@ -0,0 +1,84 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 600000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 600000
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (&kbase_cpuprops_get_default_clock_speed)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+/**
+ * Protected mode switch
+ *
+ * Attached value: pointer to @ref kbase_protected_ops
+ */
+#define PROTECTED_CALLBACKS (&juno_protected_ops)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
+#ifdef CONFIG_DEVFREQ_THERMAL
+extern struct devfreq_cooling_ops juno_model_ops;
+#endif
+extern struct kbase_protected_ops juno_protected_ops;
diff --git a/midgard-0.r2p0/platform/mali_kbase_platform_common.h b/midgard-0.r2p0/platform/mali_kbase_platform_common.h
new file mode 100755
index 0000000..7cb3be7
--- /dev/null
+++ b/midgard-0.r2p0/platform/mali_kbase_platform_common.h
@@ -0,0 +1,26 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @brief Entry point to transfer control to a platform for early initialization
+ *
+ * This function is called early on in the initialization during execution of
+ * @ref kbase_driver_init.
+ *
+ * @return Zero to indicate success non-zero for failure.
+ */
+int kbase_platform_early_init(void);
diff --git a/midgard-0.r2p0/platform/mali_kbase_platform_fake.h b/midgard-0.r2p0/platform/mali_kbase_platform_fake.h
new file mode 100755
index 0000000..01f9dfc
--- /dev/null
+++ b/midgard-0.r2p0/platform/mali_kbase_platform_fake.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+
+/**
+ * kbase_platform_fake_register - Entry point for fake platform registration
+ *
+ * This function is called early on in the initialization during execution of
+ * kbase_driver_init.
+ *
+ * Return: 0 to indicate success, non-zero for failure.
+ */
+int kbase_platform_fake_register(void);
+
+/**
+ * kbase_platform_fake_unregister - Entry point for fake platform unregistration
+ *
+ * This function is called in the termination during execution of
+ * kbase_driver_exit.
+ */
+void kbase_platform_fake_unregister(void);
+
+#endif /* CONFIG_MALI_PLATFORM_FAKE */
diff --git a/midgard-0.r2p0/platform/vexpress/Kbuild b/midgard-0.r2p0/platform/vexpress/Kbuild
new file mode 100755
index 0000000..084a156
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+obj-y += mali_kbase_config_vexpress.o
+obj-y += mali_kbase_cpu_vexpress.o
diff --git a/midgard-0.r2p0/platform/vexpress/mali_kbase_config_platform.h b/midgard-0.r2p0/platform/vexpress/mali_kbase_config_platform.h
new file mode 100755
index 0000000..dc4471b
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress/mali_kbase_config_platform.h
@@ -0,0 +1,82 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase_cpu_vexpress.h"
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX kbase_get_platform_max_freq()
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN kbase_get_platform_min_freq()
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (&kbase_get_vexpress_cpu_clock_speed)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+/**
+ * Protected mode switch
+ *
+ * Attached value: pointer to @ref kbase_protected_ops
+ */
+#define PROTECTED_CALLBACKS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/midgard-0.r2p0/platform/vexpress/mali_kbase_config_vexpress.c b/midgard-0.r2p0/platform/vexpress/mali_kbase_config_vexpress.c
new file mode 100755
index 0000000..15ce2bc
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress/mali_kbase_config_vexpress.c
@@ -0,0 +1,85 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#include "mali_kbase_cpu_vexpress.h"
+#include "mali_kbase_config_platform.h"
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 68,
+ .mmu_irq_number = 69,
+ .gpu_irq_number = 70,
+ .io_memory_region = {
+ .start = 0xFC010000,
+ .end = 0xFC010000 + (4096 * 4) - 1
+ }
+};
+#endif /* CONFIG_OF */
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = NULL,
+ .power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &versatile_platform_config;
+}
+
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
diff --git a/midgard-0.r2p0/platform/vexpress/mali_kbase_cpu_vexpress.c b/midgard-0.r2p0/platform/vexpress/mali_kbase_cpu_vexpress.c
new file mode 100755
index 0000000..4665f98
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress/mali_kbase_cpu_vexpress.c
@@ -0,0 +1,279 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/io.h>
+#include <mali_kbase.h>
+#include "mali_kbase_cpu_vexpress.h"
+
+#define HZ_IN_MHZ (1000000)
+
+#define CORETILE_EXPRESS_A9X4_SCC_START (0x100E2000)
+#define MOTHERBOARD_SYS_CFG_START (0x10000000)
+#define SYS_CFGDATA_OFFSET (0x000000A0)
+#define SYS_CFGCTRL_OFFSET (0x000000A4)
+#define SYS_CFGSTAT_OFFSET (0x000000A8)
+
+#define SYS_CFGCTRL_START_BIT_VALUE (1 << 31)
+#define READ_REG_BIT_VALUE (0 << 30)
+#define DCC_DEFAULT_BIT_VALUE (0 << 26)
+#define SYS_CFG_OSC_FUNC_BIT_VALUE (1 << 20)
+#define SITE_DEFAULT_BIT_VALUE (1 << 16)
+#define BOARD_STACK_POS_DEFAULT_BIT_VALUE (0 << 12)
+#define DEVICE_DEFAULT_BIT_VALUE (2 << 0)
+#define SYS_CFG_COMPLETE_BIT_VALUE (1 << 0)
+#define SYS_CFG_ERROR_BIT_VALUE (1 << 1)
+
+#define FEED_REG_BIT_MASK (0x0F)
+#define FCLK_PA_DIVIDE_BIT_SHIFT (0x03)
+#define FCLK_PB_DIVIDE_BIT_SHIFT (0x07)
+#define FCLK_PC_DIVIDE_BIT_SHIFT (0x0B)
+#define AXICLK_PA_DIVIDE_BIT_SHIFT (0x0F)
+#define AXICLK_PB_DIVIDE_BIT_SHIFT (0x13)
+
+/* the following three values used for reading
+ * HBI value of the LogicTile daughterboard */
+#define VE_MOTHERBOARD_PERIPHERALS_SMB_CS7 (0x10000000)
+#define VE_SYS_PROC_ID1_OFFSET (0x00000088)
+#define VE_LOGIC_TILE_HBI_MASK (0x00000FFF)
+
+#define IS_SINGLE_BIT_SET(val, pos) (val&(1<<pos))
+
+/**
+ * Values used for determining the GPU frequency based on the LogicTile type
+ * Used by the function kbase_get_platform_logic_tile_type
+ */
+#define VE_VIRTEX6_GPU_FREQ_MIN 5000
+#define VE_VIRTEX6_GPU_FREQ_MAX 5000
+#define VE_VIRTEX7_GPU_FREQ_MIN 40000
+#define VE_VIRTEX7_GPU_FREQ_MAX 40000
+#define VE_DEFAULT_GPU_FREQ_MIN 5000
+#define VE_DEFAULT_GPU_FREQ_MAX 5000
+
+
+#define CPU_CLOCK_SPEED_UNDEFINED (0)
+
+static u32 cpu_clock_speed = CPU_CLOCK_SPEED_UNDEFINED;
+
+static DEFINE_RAW_SPINLOCK(syscfg_lock);
+/**
+ * kbase_get_vendor_specific_cpu_clock_speed -Retrieves the CPU clock speed
+ * @cpu_clock - the value of CPU clock speed in MHz
+ *
+ * Returns 0 on success, error code otherwise.
+ *
+ * The implementation is platform specific.
+*/
+int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock)
+{
+ int err = 0;
+ u32 reg_val = 0;
+ u32 osc2_value = 0;
+ u32 pa_divide = 0;
+ u32 pb_divide = 0;
+ u32 pc_divide = 0;
+ void __iomem *syscfg_reg = NULL;
+ void __iomem *scc_reg = NULL;
+
+ if (CPU_CLOCK_SPEED_UNDEFINED != cpu_clock_speed) {
+ *cpu_clock = cpu_clock_speed;
+ return 0;
+ }
+
+ /* Init the value in case something goes wrong */
+ *cpu_clock = 0;
+
+ /* Map CPU register into virtual memory */
+ syscfg_reg = ioremap(MOTHERBOARD_SYS_CFG_START, 0x1000);
+ if (syscfg_reg == NULL) {
+ err = -EIO;
+ goto syscfg_reg_map_failed;
+ }
+
+ scc_reg = ioremap(CORETILE_EXPRESS_A9X4_SCC_START, 0x1000);
+ if (scc_reg == NULL) {
+ err = -EIO;
+ goto scc_reg_map_failed;
+ }
+
+ raw_spin_lock(&syscfg_lock);
+
+ /* Read SYS regs - OSC2 */
+ reg_val = readl(syscfg_reg + SYS_CFGCTRL_OFFSET);
+
+ /* Check if there is any other undergoing request */
+ if (reg_val & SYS_CFGCTRL_START_BIT_VALUE) {
+ err = -EBUSY;
+ goto ongoing_request;
+ }
+ /* Reset the CGFGSTAT reg */
+ writel(0, (syscfg_reg + SYS_CFGSTAT_OFFSET));
+
+ writel(SYS_CFGCTRL_START_BIT_VALUE | READ_REG_BIT_VALUE |
+ DCC_DEFAULT_BIT_VALUE |
+ SYS_CFG_OSC_FUNC_BIT_VALUE |
+ SITE_DEFAULT_BIT_VALUE |
+ BOARD_STACK_POS_DEFAULT_BIT_VALUE |
+ DEVICE_DEFAULT_BIT_VALUE,
+ (syscfg_reg + SYS_CFGCTRL_OFFSET));
+ /* Wait for the transaction to complete */
+ while (!(readl(syscfg_reg + SYS_CFGSTAT_OFFSET) &
+ SYS_CFG_COMPLETE_BIT_VALUE))
+ ;
+ /* Read SYS_CFGSTAT Register to get the status of submitted
+ * transaction */
+ reg_val = readl(syscfg_reg + SYS_CFGSTAT_OFFSET);
+
+ if (reg_val & SYS_CFG_ERROR_BIT_VALUE) {
+ /* Error while setting register */
+ err = -EIO;
+ goto set_reg_error;
+ }
+
+ osc2_value = readl(syscfg_reg + SYS_CFGDATA_OFFSET);
+ /* Read the SCC CFGRW0 register */
+ reg_val = readl(scc_reg);
+
+ /*
+ * Select the appropriate feed:
+ * CFGRW0[0] - CLKOB
+ * CFGRW0[1] - CLKOC
+ * CFGRW0[2] - FACLK (CLK)B FROM AXICLK PLL)
+ */
+ /* Calculate the FCLK */
+ if (IS_SINGLE_BIT_SET(reg_val, 0)) {
+ /* CFGRW0[0] - CLKOB */
+ /* CFGRW0[6:3] */
+ pa_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ FCLK_PA_DIVIDE_BIT_SHIFT)) >>
+ FCLK_PA_DIVIDE_BIT_SHIFT);
+ /* CFGRW0[10:7] */
+ pb_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ FCLK_PB_DIVIDE_BIT_SHIFT)) >>
+ FCLK_PB_DIVIDE_BIT_SHIFT);
+ *cpu_clock = osc2_value * (pa_divide + 1) / (pb_divide + 1);
+ } else if (IS_SINGLE_BIT_SET(reg_val, 1)) {
+ /* CFGRW0[1] - CLKOC */
+ /* CFGRW0[6:3] */
+ pa_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ FCLK_PA_DIVIDE_BIT_SHIFT)) >>
+ FCLK_PA_DIVIDE_BIT_SHIFT);
+ /* CFGRW0[14:11] */
+ pc_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ FCLK_PC_DIVIDE_BIT_SHIFT)) >>
+ FCLK_PC_DIVIDE_BIT_SHIFT);
+ *cpu_clock = osc2_value * (pa_divide + 1) / (pc_divide + 1);
+ } else if (IS_SINGLE_BIT_SET(reg_val, 2)) {
+ /* CFGRW0[2] - FACLK */
+ /* CFGRW0[18:15] */
+ pa_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ AXICLK_PA_DIVIDE_BIT_SHIFT)) >>
+ AXICLK_PA_DIVIDE_BIT_SHIFT);
+ /* CFGRW0[22:19] */
+ pb_divide = ((reg_val & (FEED_REG_BIT_MASK <<
+ AXICLK_PB_DIVIDE_BIT_SHIFT)) >>
+ AXICLK_PB_DIVIDE_BIT_SHIFT);
+ *cpu_clock = osc2_value * (pa_divide + 1) / (pb_divide + 1);
+ } else {
+ err = -EIO;
+ }
+
+set_reg_error:
+ongoing_request:
+ raw_spin_unlock(&syscfg_lock);
+ *cpu_clock /= HZ_IN_MHZ;
+
+ if (!err)
+ cpu_clock_speed = *cpu_clock;
+
+ iounmap(scc_reg);
+
+scc_reg_map_failed:
+ iounmap(syscfg_reg);
+
+syscfg_reg_map_failed:
+
+ return err;
+}
+
+/**
+ * kbase_get_platform_logic_tile_type - determines which LogicTile type
+ * is used by Versatile Express
+ *
+ * When platform_config build parameter is specified as vexpress, i.e.,
+ * platform_config=vexpress, GPU frequency may vary dependent on the
+ * particular platform. The GPU frequency depends on the LogicTile type.
+ *
+ * This function determines which LogicTile type is used by the platform by
+ * reading the HBI value of the daughterboard which holds the LogicTile:
+ *
+ * 0x217 HBI0217 Virtex-6
+ * 0x192 HBI0192 Virtex-5
+ * 0x247 HBI0247 Virtex-7
+ *
+ * Return: HBI value of the logic tile daughterboard, zero if not accessible
+ */
+static u32 kbase_get_platform_logic_tile_type(void)
+{
+ void __iomem *syscfg_reg = NULL;
+ u32 sys_procid1 = 0;
+
+ syscfg_reg = ioremap(VE_MOTHERBOARD_PERIPHERALS_SMB_CS7 + VE_SYS_PROC_ID1_OFFSET, 4);
+ if (NULL != syscfg_reg) {
+ sys_procid1 = readl(syscfg_reg);
+ iounmap(syscfg_reg);
+ }
+
+ return sys_procid1 & VE_LOGIC_TILE_HBI_MASK;
+}
+
+u32 kbase_get_platform_min_freq(void)
+{
+ u32 ve_logic_tile = kbase_get_platform_logic_tile_type();
+
+ switch (ve_logic_tile) {
+ case 0x217:
+ /* Virtex 6, HBI0217 */
+ return VE_VIRTEX6_GPU_FREQ_MIN;
+ case 0x247:
+ /* Virtex 7, HBI0247 */
+ return VE_VIRTEX7_GPU_FREQ_MIN;
+ default:
+ /* all other logic tiles, i.e., Virtex 5 HBI0192
+ * or unsuccessful reading from the platform -
+ * fall back to some default value */
+ return VE_DEFAULT_GPU_FREQ_MIN;
+ }
+}
+
+u32 kbase_get_platform_max_freq(void)
+{
+ u32 ve_logic_tile = kbase_get_platform_logic_tile_type();
+
+ switch (ve_logic_tile) {
+ case 0x217:
+ /* Virtex 6, HBI0217 */
+ return VE_VIRTEX6_GPU_FREQ_MAX;
+ case 0x247:
+ /* Virtex 7, HBI0247 */
+ return VE_VIRTEX7_GPU_FREQ_MAX;
+ default:
+ /* all other logic tiles, i.e., Virtex 5 HBI0192
+ * or unsuccessful reading from the platform -
+ * fall back to some default value */
+ return VE_DEFAULT_GPU_FREQ_MAX;
+ }
+}
diff --git a/midgard-0.r2p0/platform/vexpress/mali_kbase_cpu_vexpress.h b/midgard-0.r2p0/platform/vexpress/mali_kbase_cpu_vexpress.h
new file mode 100755
index 0000000..da86569
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress/mali_kbase_cpu_vexpress.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_CPU_VEXPRESS_H_
+#define _KBASE_CPU_VEXPRESS_H_
+
+/**
+ * Versatile Express implementation of @ref kbase_cpu_clk_speed_func.
+ */
+int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock);
+
+/**
+ * Get the minimum GPU frequency for the attached logic tile
+ */
+u32 kbase_get_platform_min_freq(void);
+
+/**
+ * Get the maximum GPU frequency for the attached logic tile
+ */
+u32 kbase_get_platform_max_freq(void);
+
+#endif /* _KBASE_CPU_VEXPRESS_H_ */
diff --git a/midgard-0.r2p0/platform/vexpress_1xv7_a57/Kbuild b/midgard-0.r2p0/platform/vexpress_1xv7_a57/Kbuild
new file mode 100755
index 0000000..d9bfabc
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress_1xv7_a57/Kbuild
@@ -0,0 +1,16 @@
+#
+# (C) COPYRIGHT 2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+obj-y += mali_kbase_config_vexpress.o
diff --git a/midgard-0.r2p0/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h b/midgard-0.r2p0/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
new file mode 100755
index 0000000..b0490ca
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
@@ -0,0 +1,80 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 5000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 5000
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (&kbase_cpuprops_get_default_clock_speed)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+/**
+ * Protected mode switch
+ *
+ * Attached value: pointer to @ref kbase_protected_ops
+ */
+#define PROTECTED_CALLBACKS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/midgard-0.r2p0/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c b/midgard-0.r2p0/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
new file mode 100755
index 0000000..3ff0930
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 68,
+ .mmu_irq_number = 69,
+ .gpu_irq_number = 70,
+ .io_memory_region = {
+ .start = 0x2f010000,
+ .end = 0x2f010000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = NULL,
+ .power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &versatile_platform_config;
+}
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
diff --git a/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/Kbuild b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/Kbuild
new file mode 100755
index 0000000..0cb41ce
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+obj-y += mali_kbase_config_vexpress.o
+obj-y += mali_kbase_cpu_vexpress.o
diff --git a/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
new file mode 100755
index 0000000..22ffccb
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
@@ -0,0 +1,82 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase_cpu_vexpress.h"
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 10000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 10000
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define CPU_SPEED_FUNC (&kbase_get_vexpress_cpu_clock_speed)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value: NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+/**
+ * Protected mode switch
+ *
+ * Attached value: pointer to @ref kbase_protected_ops
+ */
+#define PROTECTED_CALLBACKS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
new file mode 100755
index 0000000..76ffe4a
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
@@ -0,0 +1,83 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#include "mali_kbase_cpu_vexpress.h"
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 75,
+ .mmu_irq_number = 76,
+ .gpu_irq_number = 77,
+ .io_memory_region = {
+ .start = 0x2F000000,
+ .end = 0x2F000000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = NULL,
+ .power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &versatile_platform_config;
+}
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
+
diff --git a/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c
new file mode 100755
index 0000000..816dff4
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c
@@ -0,0 +1,71 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/io.h>
+#include <mali_kbase.h>
+#include "mali_kbase_cpu_vexpress.h"
+
+#define HZ_IN_MHZ (1000000)
+
+#define CORETILE_EXPRESS_A9X4_SCC_START (0x100E2000)
+#define MOTHERBOARD_SYS_CFG_START (0x10000000)
+#define SYS_CFGDATA_OFFSET (0x000000A0)
+#define SYS_CFGCTRL_OFFSET (0x000000A4)
+#define SYS_CFGSTAT_OFFSET (0x000000A8)
+
+#define SYS_CFGCTRL_START_BIT_VALUE (1 << 31)
+#define READ_REG_BIT_VALUE (0 << 30)
+#define DCC_DEFAULT_BIT_VALUE (0 << 26)
+#define SYS_CFG_OSC_FUNC_BIT_VALUE (1 << 20)
+#define SITE_DEFAULT_BIT_VALUE (1 << 16)
+#define BOARD_STACK_POS_DEFAULT_BIT_VALUE (0 << 12)
+#define DEVICE_DEFAULT_BIT_VALUE (2 << 0)
+#define SYS_CFG_COMPLETE_BIT_VALUE (1 << 0)
+#define SYS_CFG_ERROR_BIT_VALUE (1 << 1)
+
+#define FEED_REG_BIT_MASK (0x0F)
+#define FCLK_PA_DIVIDE_BIT_SHIFT (0x03)
+#define FCLK_PB_DIVIDE_BIT_SHIFT (0x07)
+#define FCLK_PC_DIVIDE_BIT_SHIFT (0x0B)
+#define AXICLK_PA_DIVIDE_BIT_SHIFT (0x0F)
+#define AXICLK_PB_DIVIDE_BIT_SHIFT (0x13)
+
+#define IS_SINGLE_BIT_SET(val, pos) (val&(1<<pos))
+
+#define CPU_CLOCK_SPEED_UNDEFINED 0
+
+#define CPU_CLOCK_SPEED_6XV7 50
+
+static u32 cpu_clock_speed = CPU_CLOCK_SPEED_UNDEFINED;
+
+static DEFINE_RAW_SPINLOCK(syscfg_lock);
+/**
+ * kbase_get_vendor_specific_cpu_clock_speed
+ * @brief Retrieves the CPU clock speed.
+ * The implementation is platform specific.
+ * @param[out] cpu_clock - the value of CPU clock speed in MHz
+ * @return 0 on success, 1 otherwise
+*/
+int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock)
+{
+ /* TODO: MIDBASE-2873 - Provide runtime detection of CPU clock freq for 6XV7 board */
+ *cpu_clock = CPU_CLOCK_SPEED_6XV7;
+
+ return 0;
+}
diff --git a/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.h b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.h
new file mode 100755
index 0000000..23647cc
--- /dev/null
+++ b/midgard-0.r2p0/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.h
@@ -0,0 +1,28 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _KBASE_CPU_VEXPRESS_H_
+#define _KBASE_CPU_VEXPRESS_H_
+
+/**
+ * Versatile Express implementation of @ref kbase_cpu_clk_speed_func.
+ */
+int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock);
+
+#endif /* _KBASE_CPU_VEXPRESS_H_ */
diff --git a/midgard-0.r2p0/platform_dummy/mali_ukk_os.h b/midgard-0.r2p0/platform_dummy/mali_ukk_os.h
new file mode 100755
index 0000000..5fa9b39
--- /dev/null
+++ b/midgard-0.r2p0/platform_dummy/mali_ukk_os.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_ukk_os.h
+ * Types and definitions that are common for Linux OSs for the kernel side of the
+ * User-Kernel interface.
+ */
+
+#ifndef _UKK_OS_H_ /* Linux version */
+#define _UKK_OS_H_
+
+#include <linux/fs.h>
+
+/**
+ * @addtogroup uk_api User-Kernel Interface API
+ * @{
+ */
+
+/**
+ * @addtogroup uk_api_kernel UKK (Kernel side)
+ * @{
+ */
+
+/**
+ * Internal OS specific data structure associated with each UKK session. Part
+ * of a ukk_session object.
+ */
+typedef struct ukkp_session {
+ int dummy; /**< No internal OS specific data at this time */
+} ukkp_session;
+
+/** @} end group uk_api_kernel */
+
+/** @} end group uk_api */
+
+#endif /* _UKK_OS_H__ */
diff --git a/midgard-0.r2p0/sconscript b/midgard-0.r2p0/sconscript
new file mode 100755
index 0000000..7b7ec77
--- /dev/null
+++ b/midgard-0.r2p0/sconscript
@@ -0,0 +1,85 @@
+#
+# (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+import sys
+Import('env')
+
+if Glob('tests/sconscript'):
+ SConscript( 'tests/sconscript' )
+
+mock_test = 0
+
+# Fake platform is a transient solution for GPL drivers running in kernel that does not provide configuration via platform data.
+# For such kernels fake_platform_device should be set to 1. For kernels providing platform data fake_platform_device should be set to 0.
+if env['platform_config']=='devicetree':
+ fake_platform_device = 0
+else:
+ fake_platform_device = 1
+
+# Source files required for kbase.
+kbase_src = [
+ Glob('*.c'),
+ Glob('backend/*/*.c'),
+ Glob('internal/*/*.c'),
+ Glob('platform/%s/*.c' % env['platform_config']),
+]
+
+if Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock') and env['unit'] == '1':
+ kbase_src += [Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock/*.c')]
+ mock_test = 1
+
+# we need platform config for GPL version using fake platform
+if fake_platform_device==1:
+ # Check if we are compiling for PBX
+ if env.KernelConfigEnabled("CONFIG_MACH_REALVIEW_PBX") and \
+ env["platform_config"] in {"vexpress", "vexpress_6xvirtex7_10mhz"}:
+ sys.stderr.write("WARNING: Building for a PBX kernel but with platform_config=vexpress*\n")
+ # if the file platform config file is in the tpip directory then use that, otherwise use the default config directory
+ if Glob('#kernel/drivers/gpu/arm/midgard/config/tpip/*%s.c' % (env['platform_config'])):
+ kbase_src += Glob('#kernel/drivers/gpu/arm/midgard/config/tpip/*%s.c' % (env['platform_config']))
+ else:
+ kbase_src += Glob('#kernel/drivers/gpu/arm/midgard/config/*%s.c' % (env['platform_config']))
+
+make_args = env.kernel_get_config_defines(ret_list = True,
+ fake = fake_platform_device) + [
+ 'PLATFORM=%s' % env['platform'],
+ 'MALI_ERROR_INJECT_ON=%s' % env['error_inject'],
+ 'MALI_KERNEL_TEST_API=%s' % env['unit'],
+ 'MALI_UNIT_TEST=%s' % env['unit'],
+ 'MALI_RELEASE_NAME=%s' % env['mali_release_name'],
+ 'MALI_MOCK_TEST=%s' % mock_test,
+ 'MALI_CUSTOMER_RELEASE=%s' % env['release'],
+ 'MALI_INSTRUMENTATION_LEVEL=%s' % env['instr'],
+ 'MALI_COVERAGE=%s' % env['coverage'],
+ 'MALI_BUS_LOG=%s' % env['buslog'],
+]
+
+kbase = env.BuildKernelModule('$STATIC_LIB_PATH/mali_kbase.ko', kbase_src,
+ make_args = make_args)
+
+# Add a dependency on kds.ko.
+# Only necessary when KDS is not built into the kernel.
+#
+if env['os'] != 'android':
+ if not env.KernelConfigEnabled("CONFIG_KDS"):
+ env.Depends(kbase, '$STATIC_LIB_PATH/kds.ko')
+
+# need Module.symvers from ump.ko build
+if int(env['ump']) == 1:
+ env.Depends(kbase, '$STATIC_LIB_PATH/ump.ko')
+
+env.KernelObjTarget('kbase', kbase)
+
+env.AppendUnique(BASE=['cutils_linked_list'])