aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/arm/t6xx/kbase/src
diff options
context:
space:
mode:
authorShow Liu <show.liu@linaro.org>2014-06-20 09:45:28 +0800
committerShow Liu <show.liu@linaro.org>2014-06-20 09:45:28 +0800
commit0bd72eeeb62ab9333989c0f90a99f3a7798776dc (patch)
treef72bbda9a313916300bfd1ac2d5db366a822662e /drivers/gpu/arm/t6xx/kbase/src
parent804a655825369a50fc59bab624b55c7733d1529b (diff)
downloadlinaro-lsk-0bd72eeeb62ab9333989c0f90a99f3a7798776dc.tar.gz
Added Mali T6xx r3p0 GPU driver
Conflicts: drivers/video/Kconfig
Diffstat (limited to 'drivers/gpu/arm/t6xx/kbase/src')
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/Kbuild196
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/Makefile35
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/Makefile.kbase16
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase.h435
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_10969_workaround.c182
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_10969_workaround.h23
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.c422
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.h31
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.c39
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.h45
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_config.c607
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_context.c227
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cpuprops.c94
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cpuprops.h54
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_debug.c63
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_defs.h787
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_device.c691
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_event.c174
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gator.h31
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.c314
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.h52
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops_types.h101
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_hw.c84
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_hw.h39
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_instr.c616
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jd.c1438
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.c1274
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.h202
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js.c2122
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js.h927
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_affinity.c407
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_affinity.h154
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_ctx_attr.c307
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_ctx_attr.h156
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_defs.h474
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy.h765
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy_cfs.c1436
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy_cfs.h165
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem.c2034
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem.h580
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem_alloc.c234
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem_alloc.h31
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem_lowlevel.c53
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem_lowlevel.h109
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mmu.c1572
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm.c441
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm.h830
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_always_on.c60
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_always_on.h66
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_ca.c173
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_ca.h170
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_ca_fixed.c62
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_ca_fixed.h37
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_coarse_demand.c66
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_coarse_demand.h58
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_demand.c68
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_demand.h55
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_driver.c911
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_metrics.c264
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_metrics_dummy.c37
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_policy.c792
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm_policy.h270
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_security.c76
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_security.h50
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_softjobs.c430
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_trace_defs.h226
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_trace_timeline.c227
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_trace_timeline.h366
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_trace_timeline_defs.h130
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_uku.h339
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_utility.c30
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_utility.h35
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_midg_regmap.h468
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/common/mali_timeline.h367
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_config_linux.c44
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_config_linux.h41
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_core_linux.c2921
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_linux.h79
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_mem_linux.c724
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_mem_linux.h60
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_sync.c190
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_sync.h81
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_kbase_sync_user.c153
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/linux/mali_linux_trace.h124
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/mali_base_mem_priv.h50
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/mali_base_vendor_specific_func.h24
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/platform/Kbuild20
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/platform/Kconfig23
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/platform/vexpress/Kbuild17
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/platform/vexpress/mali_kbase_config_vexpress.c384
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/platform/vexpress/mali_kbase_cpu_vexpress.c178
-rwxr-xr-xdrivers/gpu/arm/t6xx/kbase/src/platform/vexpress/mali_kbase_cpu_vexpress.h26
92 files changed, 32041 insertions, 0 deletions
diff --git a/drivers/gpu/arm/t6xx/kbase/src/Kbuild b/drivers/gpu/arm/t6xx/kbase/src/Kbuild
new file mode 100755
index 00000000000..c2b81153c9b
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/Kbuild
@@ -0,0 +1,196 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+# Driver version string which is returned to userspace via an ioctl
+MALI_RELEASE_NAME ?= "r3p0-02rel0"
+
+# Paths required for build
+KBASE_PATH = $(src)/../..
+KBASE_SRC_PATH = $(src)/..
+KBASE_PLATFORM_PATH = $(KBASE_SRC_PATH)/platform_dummy
+UMP_PATH = $(src)/../../../../../base
+
+ifeq ($(CONFIG_MALI_ERROR_INJECTION),y)
+MALI_ERROR_INJECT_ON = 1
+endif
+
+# Set up defaults if not defined by build system
+MALI_CUSTOMER_RELEASE ?= 1
+MALI_UNIT_TEST ?= 0
+MALI_KERNEL_TEST_API ?= 0
+MALI_ERROR_INJECT_ON ?= 0
+MALI_MOCK_TEST ?= 0
+MALI_COVERAGE ?= 0
+MALI_INSTRUMENTATION_LEVEL ?= 0
+# This workaround is for what seems to be a compiler bug we observed in
+# GCC 4.7 on AOSP 4.3. The bug caused an intermittent failure compiling
+# the "_Pragma" syntax, where an error message is returned:
+#
+# "internal compiler error: unspellable token PRAGMA"
+#
+# This regression has thus far only been seen on the GCC 4.7 compiler bundled
+# with AOSP 4.3.0. So this makefile, intended for in-tree kernel builds
+# which are not known to be used with AOSP, is hardcoded to disable the
+# workaround, i.e. set the define to 0.
+MALI_GCC_WORKAROUND_MIDCOM_4598 ?= 0
+
+# Set up our defines, which will be passed to gcc
+DEFINES = \
+ -DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+ -DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
+ -DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
+ -DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \
+ -DMALI_MOCK_TEST=$(MALI_MOCK_TEST) \
+ -DMALI_COVERAGE=$(MALI_COVERAGE) \
+ -DMALI_INSTRUMENTATION_LEVEL=$(MALI_INSTRUMENTATION_LEVEL) \
+ -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \
+ -DMALI_GCC_WORKAROUND_MIDCOM_4598=$(MALI_GCC_WORKAROUND_MIDCOM_4598)
+
+ifeq ($(KBUILD_EXTMOD),)
+# in-tree
+DEFINES +=-DMALI_KBASE_THIRDPARTY_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)
+else
+# out-of-tree
+DEFINES +=-DMALI_KBASE_THIRDPARTY_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)
+endif
+
+# Use our defines when compiling
+ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(src)/linux -I$(KBASE_SRC_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH)
+subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(src)/linux -I$(KBASE_SRC_PATH) -I$(KBASE_PLATFORM_PATH) -I$(OSK_PATH) -I$(UMP_PATH)
+
+SRC := \
+ common/mali_kbase_device.c \
+ common/mali_kbase_cache_policy.c \
+ common/mali_kbase_mem.c \
+ common/mali_kbase_mmu.c \
+ common/mali_kbase_jd.c \
+ common/mali_kbase_jm.c \
+ common/mali_kbase_cpuprops.c \
+ common/mali_kbase_gpuprops.c \
+ common/mali_kbase_js.c \
+ common/mali_kbase_js_affinity.c \
+ common/mali_kbase_js_ctx_attr.c \
+ common/mali_kbase_pm.c \
+ common/mali_kbase_event.c \
+ common/mali_kbase_context.c \
+ common/mali_kbase_pm.c \
+ common/mali_kbase_pm_driver.c \
+ common/mali_kbase_pm_metrics.c \
+ common/mali_kbase_pm_ca.c \
+ common/mali_kbase_pm_ca_fixed.c \
+ common/mali_kbase_pm_always_on.c \
+ common/mali_kbase_pm_coarse_demand.c \
+ common/mali_kbase_pm_demand.c \
+ common/mali_kbase_pm_policy.c \
+ common/mali_kbase_config.c \
+ common/mali_kbase_security.c \
+ common/mali_kbase_instr.c \
+ common/mali_kbase_softjobs.c \
+ common/mali_kbase_8401_workaround.c \
+ common/mali_kbase_10969_workaround.c \
+ common/mali_kbase_hw.c \
+ common/mali_kbase_utility.c \
+ common/mali_kbase_mem_lowlevel.c \
+ common/mali_kbase_debug.c \
+ common/mali_kbase_trace_timeline.c \
+ linux/mali_kbase_mem_linux.c \
+ linux/mali_kbase_core_linux.c \
+ linux/mali_kbase_config_linux.c \
+ linux/mali_kbase_sync.c \
+ linux/mali_kbase_sync_user.c \
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+SRC += \
+ common/mali_kbase_pm_ca_random.c \
+ common/mali_kbase_pm_demand_always_powered.c \
+ common/mali_kbase_pm_fast_start.c
+endif
+
+# Job Scheduler Policy: Completely Fair Scheduler
+SRC += common/mali_kbase_js_policy_cfs.c
+
+ifeq ($(CONFIG_MACH_MANTA),y)
+ SRC += common/mali_kbase_mem_alloc_carveout.c
+else
+ SRC += common/mali_kbase_mem_alloc.c
+endif
+
+# ensure GPL version of malisw gets pulled in
+ccflags-y += -I$(KBASE_PATH)
+
+ifeq ($(CONFIG_MALI_NO_MALI),y)
+ # Dummy model
+ SRC += common/mali_kbase_model_dummy.c
+ SRC += linux/mali_kbase_model_linux.c
+ # HW error simulation
+ SRC += common/mali_kbase_model_error_generator.c
+endif
+
+ifeq ($(MALI_MOCK_TEST),1)
+ # Test functionality
+ SRC += ../tests/internal/src/mock/mali_kbase_pm_driver_mock.c
+endif
+
+# in-tree/out-of-tree logic needs to be slightly different to determine if a file is present
+ifeq ($(KBUILD_EXTMOD),)
+# in-tree
+MALI_METRICS_PATH = $(srctree)/drivers/gpu/arm/t6xx/kbase/src/linux
+else
+# out-of-tree
+MALI_METRICS_PATH = $(KBUILD_EXTMOD)/linux
+endif
+
+# Use vsync metrics example using PL111 driver, if available
+ifeq ($(wildcard $(MALI_METRICS_PATH)/mali_kbase_pm_metrics_linux.c),)
+ SRC += common/mali_kbase_pm_metrics_dummy.c
+else
+ SRC += linux/mali_kbase_pm_metrics_linux.c
+endif
+
+ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS),y)
+ SRC += platform/vexpress/mali_kbase_config_vexpress.c \
+ platform/vexpress/mali_kbase_cpu_vexpress.c
+endif
+
+ifeq ($(CONFIG_MALI_PLATFORM_GOLDFISH),y)
+ SRC += platform/goldfish/mali_kbase_config_goldfish.c
+endif
+
+ifeq ($(CONFIG_MALI_PLATFORM_PBX),y)
+ SRC += platform/pbx/mali_kbase_config_pbx.c
+endif
+
+ifeq ($(CONFIG_MALI_PLATFORM_PANDA),y)
+ SRC += platform/panda/mali_kbase_config_panda.c
+endif
+
+ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y)
+ifeq ($(CONFIG_MALI_T6XX),m)
+# remove begin and end quotes from the Kconfig string type
+ platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME))
+ MALI_PLATFORM_THIRDPARTY_DIR := platform/$(platform_name)
+ include $(src)/platform/$(platform_name)/Kbuild
+else ifeq ($(CONFIG_MALI_T6XX),y)
+ obj-$(CONFIG_MALI_T6XX) += platform/
+endif
+endif
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI_T6XX) += mali_kbase.o
+
+# Tell the Linux build system to enable building of our .c files
+mali_kbase-y := $(SRC:.c=.o)
+
+
diff --git a/drivers/gpu/arm/t6xx/kbase/src/Makefile b/drivers/gpu/arm/t6xx/kbase/src/Makefile
new file mode 100755
index 00000000000..f214d82d168
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/Makefile
@@ -0,0 +1,35 @@
+#
+# (C) COPYRIGHT 2010-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+KDIR ?= /lib/modules/$(shell uname -r)/build
+
+UMP_PATH_RELATIVE = $(CURDIR)/../../../../../base/ump
+KBASE_PATH_RELATIVE = $(CURDIR)/../../kbase
+KDS_PATH_RELATIVE = $(CURDIR)/../../../../../..
+EXTRA_SYMBOLS = $(UMP_PATH_RELATIVE)/src/Module.symvers
+
+ifeq ($(MALI_UNIT_TEST), 1)
+ EXTRA_SYMBOLS += $(KBASE_PATH_RELATIVE)/tests/internal/src/kernel_assert_module/linux/Module.symvers
+endif
+
+# GPL driver supports KDS
+EXTRA_SYMBOLS += $(KDS_PATH_RELATIVE)/drivers/base/kds/Module.symvers
+
+# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
+all:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../../../include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/gpu/arm/t6xx/kbase/src/Makefile.kbase b/drivers/gpu/arm/t6xx/kbase/src/Makefile.kbase
new file mode 100755
index 00000000000..1a1ca17e1c7
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/Makefile.kbase
@@ -0,0 +1,16 @@
+#
+# (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+EXTRA_CFLAGS += -I$(ROOT) -I$(KBASE_PATH)/.. -I$(OSK_PATH)/src/linux/include -I$(KBASE_PATH)/platform_$(PLATFORM)
+
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase.h
new file mode 100755
index 00000000000..8044a93cda2
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase.h
@@ -0,0 +1,435 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_H_
+#define _KBASE_H_
+
+#include <malisw/mali_malisw.h>
+
+#include <kbase/mali_kbase_debug.h>
+
+#include <asm/page.h>
+
+#include <linux/atomic.h>
+#include <linux/highmem.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <kbase/mali_base_kernel.h>
+#include <kbase/src/common/mali_kbase_uku.h>
+#include <kbase/src/linux/mali_kbase_linux.h>
+
+#include "mali_kbase_pm.h"
+#include "mali_kbase_mem_lowlevel.h"
+#include "mali_kbase_defs.h"
+#include "mali_kbase_trace_timeline.h"
+#include "mali_kbase_js.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_security.h"
+#include "mali_kbase_utility.h"
+
+#include "mali_kbase_cpuprops.h"
+#include "mali_kbase_gpuprops.h"
+#ifdef CONFIG_GPU_TRACEPOINTS
+#include <trace/events/gpu.h>
+#endif
+/**
+ * @page page_base_kernel_main Kernel-side Base (KBase) APIs
+ *
+ * The Kernel-side Base (KBase) APIs are divided up as follows:
+ * - @subpage page_kbase_js_policy
+ */
+
+/**
+ * @defgroup base_kbase_api Kernel-side Base (KBase) APIs
+ */
+
+kbase_device *kbase_device_alloc(void);
+/* note: configuration attributes member of kbdev needs to have been setup before calling kbase_device_init */
+mali_error kbase_device_init(kbase_device * const kbdev);
+void kbase_device_term(kbase_device *kbdev);
+void kbase_device_free(kbase_device *kbdev);
+int kbase_device_has_feature(kbase_device *kbdev, u32 feature);
+kbase_midgard_type kbase_device_get_type(kbase_device *kbdev);
+struct kbase_device *kbase_find_device(int minor); /* Only needed for gator integration */
+
+/**
+ * Ensure that all IRQ handlers have completed execution
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_synchronize_irqs(kbase_device *kbdev);
+
+kbase_context *kbase_create_context(kbase_device *kbdev);
+void kbase_destroy_context(kbase_context *kctx);
+mali_error kbase_context_set_create_flags(kbase_context *kctx, u32 flags);
+
+mali_error kbase_instr_hwcnt_setup(kbase_context *kctx, kbase_uk_hwcnt_setup *setup);
+mali_error kbase_instr_hwcnt_enable(kbase_context *kctx, kbase_uk_hwcnt_setup *setup);
+mali_error kbase_instr_hwcnt_disable(kbase_context *kctx);
+mali_error kbase_instr_hwcnt_clear(kbase_context *kctx);
+mali_error kbase_instr_hwcnt_dump(kbase_context *kctx);
+mali_error kbase_instr_hwcnt_dump_irq(kbase_context *kctx);
+mali_bool kbase_instr_hwcnt_dump_complete(kbase_context *kctx, mali_bool * const success);
+void kbase_instr_hwcnt_suspend(kbase_device *kbdev);
+void kbase_instr_hwcnt_resume(kbase_device *kbdev);
+
+void kbasep_cache_clean_worker(struct work_struct *data);
+void kbase_clean_caches_done(kbase_device *kbdev);
+
+/**
+ * The GPU has completed performance count sampling successfully.
+ */
+void kbase_instr_hwcnt_sample_done(kbase_device *kbdev);
+
+mali_error kbase_create_os_context(kbase_os_context * const osctx);
+void kbase_destroy_os_context(kbase_os_context *osctx);
+
+mali_error kbase_jd_init(kbase_context *kctx);
+void kbase_jd_exit(kbase_context *kctx);
+mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *user_bag);
+void kbase_jd_done(kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
+ kbasep_js_atom_done_code done_code);
+void kbase_jd_cancel(kbase_device *kbdev, kbase_jd_atom *katom);
+void kbase_jd_zap_context(kbase_context *kctx);
+mali_bool jd_done_nolock(kbase_jd_atom *katom);
+void kbase_jd_free_external_resources(kbase_jd_atom *katom);
+
+mali_error kbase_job_slot_init(kbase_device *kbdev);
+void kbase_job_slot_halt(kbase_device *kbdev);
+void kbase_job_slot_term(kbase_device *kbdev);
+void kbase_job_done(kbase_device *kbdev, u32 done);
+void kbase_job_zap_context(kbase_context *kctx);
+
+void kbase_job_slot_softstop(kbase_device *kbdev, int js, kbase_jd_atom *target_katom);
+void kbase_job_slot_hardstop(kbase_context *kctx, int js, kbase_jd_atom *target_katom);
+
+void kbase_event_post(kbase_context *ctx, kbase_jd_atom *event);
+int kbase_event_dequeue(kbase_context *ctx, base_jd_event_v2 *uevent);
+int kbase_event_pending(kbase_context *ctx);
+mali_error kbase_event_init(kbase_context *kctx);
+void kbase_event_close(kbase_context *kctx);
+void kbase_event_cleanup(kbase_context *kctx);
+void kbase_event_wakeup(kbase_context *kctx);
+
+int kbase_process_soft_job(kbase_jd_atom *katom);
+mali_error kbase_prepare_soft_job(kbase_jd_atom *katom);
+void kbase_finish_soft_job(kbase_jd_atom *katom);
+void kbase_cancel_soft_job(kbase_jd_atom *katom);
+void kbase_resume_suspended_soft_jobs(kbase_device *kbdev);
+
+/* api used internally for register access. Contains validation and tracing */
+void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *kctx);
+u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx);
+void kbase_device_trace_register_access(kbase_context *kctx, kbase_reg_access_type type, u16 reg_offset, u32 reg_value);
+void kbase_device_trace_buffer_install(kbase_context *kctx, u32 *tb, size_t size);
+void kbase_device_trace_buffer_uninstall(kbase_context *kctx);
+
+/* api to be ported per OS, only need to do the raw register access */
+void kbase_os_reg_write(kbase_device *kbdev, u16 offset, u32 value);
+u32 kbase_os_reg_read(kbase_device *kbdev, u16 offset);
+
+/** Report a GPU fault.
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
+ *
+ * @param kbdev The kbase device that the GPU fault occurred from.
+ * @param multiple Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS was also set
+ */
+void kbase_report_gpu_fault(kbase_device *kbdev, int multiple);
+
+/** Kill all jobs that are currently running from a context
+ *
+ * This is used in response to a page fault to remove all jobs from the faulting context from the hardware.
+ *
+ * @param kctx The context to kill jobs from
+ */
+void kbase_job_kill_jobs_from_context(kbase_context *kctx);
+
+/**
+ * GPU interrupt handler
+ *
+ * This function is called from the interrupt handler when a GPU irq is to be handled.
+ *
+ * @param kbdev The kbase device to handle an IRQ for
+ * @param val The value of the GPU IRQ status register which triggered the call
+ */
+void kbase_gpu_interrupt(kbase_device *kbdev, u32 val);
+
+/**
+ * Prepare for resetting the GPU.
+ * This function just soft-stops all the slots to ensure that as many jobs as possible are saved.
+ *
+ * The function returns a boolean which should be interpreted as follows:
+ * - MALI_TRUE - Prepared for reset, kbase_reset_gpu should be called.
+ * - MALI_FALSE - Another thread is performing a reset, kbase_reset_gpu should not be called.
+ *
+ * @return See description
+ */
+mali_bool kbase_prepare_to_reset_gpu(kbase_device *kbdev);
+
+/**
+ * Pre-locked version of @a kbase_prepare_to_reset_gpu.
+ *
+ * Identical to @a kbase_prepare_to_reset_gpu, except that the
+ * kbasep_js_device_data::runpool_irq::lock is externally locked.
+ *
+ * @see kbase_prepare_to_reset_gpu
+ */
+mali_bool kbase_prepare_to_reset_gpu_locked(kbase_device *kbdev);
+
+/** Reset the GPU
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu iff it returns MALI_TRUE.
+ * It should never be called without a corresponding call to kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu returned MALI_FALSE),
+ * the caller should wait for kbdev->reset_waitq to be signalled to know when the reset has completed.
+ */
+void kbase_reset_gpu(kbase_device *kbdev);
+
+/**
+ * Pre-locked version of @a kbase_reset_gpu.
+ *
+ * Identical to @a kbase_reset_gpu, except that the
+ * kbasep_js_device_data::runpool_irq::lock is externally locked.
+ *
+ * @see kbase_reset_gpu
+ */
+void kbase_reset_gpu_locked(kbase_device *kbdev);
+
+/** Returns the name associated with a Mali exception code
+ *
+ * @param[in] exception_code exception code
+ * @return name associated with the exception code
+ */
+const char *kbase_exception_name(u32 exception_code);
+
+/**
+ * Check whether a system suspend is in progress, or has already been suspended
+ *
+ * The caller should ensure that either kbdev->pm.active_count_lock is held, or
+ * a dmb was executed recently (to ensure the value is most
+ * up-to-date). However, without a lock the value could change afterwards.
+ *
+ * @return MALI_FALSE if a suspend is not in progress
+ * @return !=MALI_FALSE otherwise
+ */
+static INLINE mali_bool kbase_pm_is_suspending(struct kbase_device *kbdev) {
+ return kbdev->pm.suspending;
+}
+
+/**
+ * Return the atom's ID, as was originally supplied by userspace in
+ * base_jd_atom_v2::atom_number
+ */
+static INLINE int kbase_jd_atom_id(kbase_context *kctx, kbase_jd_atom *katom)
+{
+ int result;
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->kctx == kctx);
+
+ result = katom - &kctx->jctx.atoms[0];
+ KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
+ return result;
+}
+
+#if KBASE_TRACE_ENABLE != 0
+/** Add trace values about a job-slot
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0)
+
+/** Add trace values about a job-slot, with info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val)
+
+/** Add trace values about a ctx refcount
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0)
+/** Add trace values about a ctx refcount, and info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val)
+
+/** Add trace values (no slot or refcount)
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) \
+ kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+ 0, 0, 0, info_val)
+
+/** Clear the trace */
+#define KBASE_TRACE_CLEAR(kbdev) \
+ kbasep_trace_clear(kbdev)
+
+/** Dump the slot trace */
+#define KBASE_TRACE_DUMP(kbdev) \
+ kbasep_trace_dump(kbdev)
+
+/** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
+void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, u32 info_val);
+/** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
+void kbasep_trace_clear(kbase_device *kbdev);
+#else
+#ifdef CONFIG_MALI_SYSTEM_TRACE
+/* Dispatch kbase trace events as system trace events */
+#include <kbase/src/linux/mali_linux_kbase_trace.h>
+#define KBASE_TRACE_ADD_SLOT( kbdev, code, ctx, katom, gpu_addr, jobslot )\
+ trace_mali_##code(jobslot, 0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO( kbdev, code, ctx, katom, gpu_addr, jobslot, info_val )\
+ trace_mali_##code(jobslot, info_val)
+
+#define KBASE_TRACE_ADD_REFCOUNT( kbdev, code, ctx, katom, gpu_addr, refcount )\
+ trace_mali_##code(refcount, 0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO( kbdev, code, ctx, katom, gpu_addr, refcount, info_val )\
+ trace_mali_##code(refcount, info_val)
+
+#define KBASE_TRACE_ADD( kbdev, code, ctx, katom, gpu_addr, info_val )\
+ trace_mali_##code(gpu_addr, info_val)
+
+#define KBASE_TRACE_CLEAR( kbdev )\
+ do{\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ }while(0)
+#define KBASE_TRACE_DUMP( kbdev )\
+ do{\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ }while(0)
+
+#else /* CONFIG_MALI_SYSTEM_TRACE */
+#define KBASE_TRACE_ADD_SLOT( kbdev, code, ctx, katom, gpu_addr, jobslot )\
+ do{\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(jobslot);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(jobslot);\
+ CSTD_UNUSED(info_val);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(refcount);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(gpu_addr);\
+ CSTD_UNUSED(info_val);\
+ CSTD_NOP(0);\
+ } while (0)
+
+#define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\
+ do {\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(code);\
+ CSTD_UNUSED(subcode);\
+ CSTD_UNUSED(ctx);\
+ CSTD_UNUSED(katom);\
+ CSTD_UNUSED(val);\
+ CSTD_NOP(0);\
+ }while(0)
+
+#define KBASE_TRACE_CLEAR( kbdev )\
+ do{\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ }while(0)
+#define KBASE_TRACE_DUMP( kbdev )\
+ do{\
+ CSTD_UNUSED(kbdev);\
+ CSTD_NOP(0);\
+ }while(0)
+#endif /* CONFIG_MALI_SYSTEM_TRACE */
+#endif
+/** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
+void kbasep_trace_dump(kbase_device *kbdev);
+#endif
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_10969_workaround.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_10969_workaround.c
new file mode 100755
index 00000000000..f14f3be17e7
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_10969_workaround.c
@@ -0,0 +1,182 @@
+/*
+ *
+ * (C) COPYRIGHT 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <kbase/src/common/mali_kbase.h>
+
+/* This function is used to solve an HW issue with single iterator GPUs.
+ * If a fragment job is soft-stopped on the edge of its bounding box, can happen that the
+ * restart index is out of bounds and the rerun causes a tile range fault. If this happens
+ * we try to clamp the restart index to a correct value and rerun the job.
+ */
+/* Mask of X and Y coordinates for the coordinates words in the descriptors*/
+#define X_COORDINATE_MASK 0x00000FFF
+#define Y_COORDINATE_MASK 0x0FFF0000
+/* Max number of words needed from the fragment shader job descriptor */
+#define JOB_HEADER_SIZE_IN_WORDS 10
+#define JOB_HEADER_SIZE (JOB_HEADER_SIZE_IN_WORDS*sizeof(u32))
+
+/* Word 0: Status Word */
+#define JOB_DESC_STATUS_WORD 0
+/* Word 1: Restart Index */
+#define JOB_DESC_RESTART_INDEX_WORD 1
+/* Word 2: Fault address low word */
+#define JOB_DESC_FAULT_ADDR_LOW_WORD 2
+/* Word 8: Minimum Tile Coordinates */
+#define FRAG_JOB_DESC_MIN_TILE_COORD_WORD 8
+/* Word 9: Maximum Tile Coordinates */
+#define FRAG_JOB_DESC_MAX_TILE_COORD_WORD 9
+
+int kbasep_10969_workaround_clamp_coordinates(kbase_jd_atom *katom)
+{
+ u32 clamped = 0;
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD,"Called TILE_RANGE_FAULT workaround clamping function. \n");
+ if (katom->core_req & BASE_JD_REQ_FS){
+ kbase_va_region * region = kbase_region_tracker_find_region_enclosing_address(katom->kctx, katom->jc );
+
+ if (region){
+ phys_addr_t * page_array = kbase_get_phy_pages(region);
+
+ if (page_array){
+ u64 page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
+ u32 offset = katom->jc & (~PAGE_MASK);
+ u32 * page_1 = NULL;
+ u32 * page_2 = NULL;
+ u32 job_header[JOB_HEADER_SIZE_IN_WORDS];
+ void* dst = job_header;
+
+ /* we need the first 10 words of the fragment shader job descriptor. We need to check
+ * that the offset + 10 words is less that the page size otherwise we need to load the next
+ * page. page_size_overflow will be equal to 0 in case the whole descriptor is within the page
+ * >0 otherwise.
+ */
+ u32 copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE);
+
+ page_1 = kmap_atomic(pfn_to_page(PFN_DOWN(page_array[page_index])));
+ if (!page_1){
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD, "Restart Index clamping function not able to map page 1 \n");
+ goto exit;
+ }
+ /* page_1 is a u32 pointer, offset is expressed in bytes */
+ page_1 += offset>>2;
+ kbase_sync_to_cpu(page_array[page_index] + offset, page_1, copy_size);
+ memcpy(dst, page_1, copy_size);
+
+ /* The data needed overflows page the dimension, need to map the subsequent page */
+ if (copy_size < JOB_HEADER_SIZE){
+ page_2 = kmap_atomic(pfn_to_page(PFN_DOWN(page_array[page_index + 1])));
+ if (!page_2){
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD, "Restart Index clamping function not able to map page 2 \n");
+ kunmap_atomic(page_1);
+ goto exit;
+ }
+ kbase_sync_to_cpu(page_array[page_index + 1], page_2, JOB_HEADER_SIZE - copy_size);
+ memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size);
+ }
+
+ /* We managed to correctly map one or two pages (in case of overflow ) */
+ {
+ u32 minX,minY,maxX,maxY;
+ u32 restartX,restartY;
+
+ /* Get Bounding Box data and restart index from fault address low word*/
+ minX = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK;
+ minY = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+ maxX = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK;
+ maxY = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+ restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK;
+ restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK;
+
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD, "Before Clamping: \n" \
+ "Jobstatus: %08x \n" \
+ "restartIdx: %08x \n" \
+ "Fault_addr_low: %08x \n" \
+ "minCoordsX: %08x minCoordsY: %08x \n" \
+ "maxCoordsX: %08x maxCoordsY: %08x \n",
+ job_header[JOB_DESC_STATUS_WORD],
+ job_header[JOB_DESC_RESTART_INDEX_WORD],
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+ minX,minY,
+ maxX,maxY );
+
+ /* Set the restart index to the one which generated the fault*/
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD];
+
+ if (restartX < minX){
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY;
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD,
+ "Clamping restart X index to minimum. %08x clamped to %08x \n",
+ restartX, minX );
+ clamped = 1;
+ }
+ if (restartY < minY){
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX;
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD,
+ "Clamping restart Y index to minimum. %08x clamped to %08x \n",
+ restartY, minY );
+ clamped = 1;
+ }
+ if (restartX > maxX){
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY;
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD,
+ "Clamping restart X index to maximum. %08x clamped to %08x \n",
+ restartX, maxX );
+ clamped = 1;
+ }
+ if (restartY > maxY){
+ job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX;
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD,"Clamping restart Y index to maximum. %08x clamped to %08x \n",
+ restartY, maxY );
+ clamped = 1;
+ }
+
+ if (clamped){
+ /* Reset the fault address low word and set the job status to STOPPED */
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0;
+ job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED;
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD, "After Clamping: \n" \
+ "Jobstatus: %08x \n" \
+ "restartIdx: %08x \n" \
+ "Fault_addr_low: %08x \n" \
+ "minCoordsX: %08x minCoordsY: %08x \n" \
+ "maxCoordsX: %08x maxCoordsY: %08x \n",
+ job_header[JOB_DESC_STATUS_WORD],
+ job_header[JOB_DESC_RESTART_INDEX_WORD],
+ job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+ minX,minY,
+ maxX,maxY );
+
+ /* Flush CPU cache to update memory for future GPU reads*/
+ memcpy(page_1, dst, copy_size);
+ kbase_sync_to_memory(page_array[page_index] + offset, page_1, copy_size);
+
+ if (copy_size < JOB_HEADER_SIZE){
+ memcpy(page_2, dst + copy_size, JOB_HEADER_SIZE - copy_size);
+ kbase_sync_to_memory(page_array[page_index + 1], page_2, JOB_HEADER_SIZE - copy_size);
+ }
+
+ }
+ }
+ if (copy_size < JOB_HEADER_SIZE)
+ kunmap_atomic(page_2);
+
+ kunmap_atomic(page_1);
+ }
+ }
+ }
+exit:
+ return clamped;
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_10969_workaround.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_10969_workaround.h
new file mode 100755
index 00000000000..0fd32eacbb8
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_10969_workaround.h
@@ -0,0 +1,23 @@
+/*
+ *
+ * (C) COPYRIGHT 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_10969_WORKAROUND_
+#define _KBASE_10969_WORKAROUND_
+
+int kbasep_10969_workaround_clamp_coordinates( kbase_jd_atom * katom );
+
+#endif /* _KBASE_10969_WORKAROUND_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.c
new file mode 100755
index 00000000000..bf42e41668b
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.c
@@ -0,0 +1,422 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_8401_workaround.c
+ * Functions related to working around BASE_HW_ISSUE_8401
+ */
+
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_kbase_defs.h>
+#include <kbase/src/common/mali_kbase_jm.h>
+#include <kbase/src/common/mali_kbase_8401_workaround.h>
+
+#define WORKAROUND_PAGE_OFFSET (2)
+#define URT_POINTER_INDEX (20)
+#define RMU_POINTER_INDEX (23)
+#define RSD_POINTER_INDEX (24)
+#define TSD_POINTER_INDEX (31)
+
+static const u32 compute_job_32bit_header[] = {
+ /* Job Descriptor Header */
+
+ /* Job Status */
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /* Flags and Indices */
+ /* job_type = compute shader job */
+ 0x00000008, 0x00000000,
+ /* Pointer to next job */
+ 0x00000000,
+ /* Reserved */
+ 0x00000000,
+ /* Job Dimension Data */
+ 0x0000000f, 0x21040842,
+ /* Task Split */
+ 0x08000000,
+ /* Reserved */
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+
+ /* Draw Call Descriptor - 32 bit (Must be aligned to a 64-byte boundry) */
+
+ /* Flags */
+ 0x00000004,
+ /* Primary Attribute Offset */
+ 0x00000000,
+ /* Primitive Index Base Value */
+ 0x00000000,
+
+ /* Pointer To Vertex Position Array (64-byte alignment) */
+ 0x00000000,
+ /* Pointer To Uniform Remapping Table (8-byte alignment) */
+ 0,
+ /* Pointer To Image Descriptor Pointer Table */
+ 0x00000000,
+ /* Pointer To Sampler Array */
+ 0x00000000,
+ /* Pointer To Register-Mapped Uniform Data Area (16-byte alignment) */
+ 0,
+ /* Pointer To Renderer State Descriptor (64-byte alignment) */
+ 0,
+ /* Pointer To Primary Attribute Buffer Array */
+ 0x00000000,
+ /* Pointer To Primary Attribute Array */
+ 0x00000000,
+ /* Pointer To Secondary Attribute Buffer Array */
+ 0x00000000,
+ /* Pointer To Secondary Attribute Array */
+ 0x00000000,
+ /* Pointer To Viewport Descriptor */
+ 0x00000000,
+ /* Pointer To Occlusion Query Result */
+ 0x00000000,
+ /* Pointer To Thread Storage (64 byte alignment) */
+ 0,
+};
+
+static const u32 compute_job_32bit_urt[] = {
+ /* Uniform Remapping Table Entry */
+ 0, 0,
+};
+
+static const u32 compute_job_32bit_rmu[] = {
+ /* Register Mapped Uniform Data Area (16 byte aligned), an array of 128-bit
+ * register values.
+ *
+ * NOTE: this is also used as the URT pointer, so the first 16-byte entry
+ * must be all zeros.
+ *
+ * For BASE_HW_ISSUE_8987, we place 16 RMUs here, because this should only
+ * be run concurrently with other GLES jobs (i.e. FS jobs from slot 0).
+ */
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+static const u32 compute_job_32bit_rsd[] = {
+ /* Renderer State Descriptor */
+
+ /* Shader program inital PC (low) */
+ 0x00000001,
+ /* Shader program initial PC (high) */
+ 0x00000000,
+ /* Image descriptor array sizes */
+ 0x00000000,
+ /* Attribute array sizes */
+ 0x00000000,
+ /* Uniform array size and Shader Flags */
+ /* Flags set: R, D, SE, Reg Uniforms==16, FPM==OpenCL */
+ 0x42003800,
+ /* Depth bias */
+ 0x00000000,
+ /* Depth slope bias */
+ 0x00000000,
+ /* Depth bias clamp */
+ 0x00000000,
+ /* Multisample Write Mask and Flags */
+ 0x00000000,
+ /* Stencil Write Masks and Alpha parameters */
+ 0x00000000,
+ /* Stencil tests - forward facing */
+ 0x00000000,
+ /* Stencel tests - back facing */
+ 0x00000000,
+ /* Alpha Test Reference Value */
+ 0x00000000,
+ /* Thread Balancing Information */
+ 0x00000000,
+ /* Blend Parameters or Pointer (low) */
+ 0x00000000,
+ /* Blend Parameters or Pointer (high) */
+ 0x00000000,
+};
+
+static const u32 compute_job_32bit_tsd[] = {
+ /* Thread Storage Descriptor */
+
+ /* Thread Local Storage Sizes */
+ 0x00000000,
+ /* Workgroup Local Memory Area Flags */
+ 0x0000001f,
+ /* Pointer to Local Storage Area */
+ 0x00021000, 0x00000001,
+ /* Pointer to Workgroup Local Storage Area */
+ 0x00000000, 0x00000000,
+ /* Pointer to Shader Exception Handler */
+ 0x00000000, 0x00000000
+};
+
+static kbase_jd_atom dummy_job_atom[KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT];
+
+/**
+ * Initialize the compute job sturcture.
+ */
+
+static void kbasep_8401_workaround_update_job_pointers(u32 *dummy_compute_job, int page_nr)
+{
+ u32 base_address = (page_nr + WORKAROUND_PAGE_OFFSET) * PAGE_SIZE;
+ u8 *dummy_job = (u8 *) dummy_compute_job;
+ u8 *dummy_job_urt;
+ u8 *dummy_job_rmu;
+ u8 *dummy_job_rsd;
+ u8 *dummy_job_tsd;
+
+ KBASE_DEBUG_ASSERT(dummy_compute_job);
+
+ /* determin where each job section goes taking alignment restrictions into consideration */
+ dummy_job_urt = (u8 *) ((((uintptr_t) dummy_job + sizeof(compute_job_32bit_header)) + 7) & ~7);
+ dummy_job_rmu = (u8 *) ((((uintptr_t) dummy_job_urt + sizeof(compute_job_32bit_urt)) + 15) & ~15);
+ dummy_job_rsd = (u8 *) ((((uintptr_t) dummy_job_rmu + sizeof(compute_job_32bit_rmu)) + 63) & ~63);
+ dummy_job_tsd = (u8 *) ((((uintptr_t) dummy_job_rsd + sizeof(compute_job_32bit_rsd)) + 63) & ~63);
+
+ /* Make sure the job fits within a single page */
+ KBASE_DEBUG_ASSERT(PAGE_SIZE > ((dummy_job_tsd + sizeof(compute_job_32bit_tsd)) - dummy_job));
+
+ /* Copy the job sections to the allocated memory */
+ memcpy(dummy_job, compute_job_32bit_header, sizeof(compute_job_32bit_header));
+ memcpy(dummy_job_urt, compute_job_32bit_urt, sizeof(compute_job_32bit_urt));
+ memcpy(dummy_job_rmu, compute_job_32bit_rmu, sizeof(compute_job_32bit_rmu));
+ memcpy(dummy_job_rsd, compute_job_32bit_rsd, sizeof(compute_job_32bit_rsd));
+ memcpy(dummy_job_tsd, compute_job_32bit_tsd, sizeof(compute_job_32bit_tsd));
+
+ /* Update header pointers */
+ *(dummy_compute_job + URT_POINTER_INDEX) = (dummy_job_urt - dummy_job) + base_address;
+ *(dummy_compute_job + RMU_POINTER_INDEX) = (dummy_job_rmu - dummy_job) + base_address;
+ *(dummy_compute_job + RSD_POINTER_INDEX) = (dummy_job_rsd - dummy_job) + base_address;
+ *(dummy_compute_job + TSD_POINTER_INDEX) = (dummy_job_tsd - dummy_job) + base_address;
+ /* Update URT pointer */
+ *((u32 *) dummy_job_urt + 0) = (((dummy_job_rmu - dummy_job) + base_address) << 8) & 0xffffff00;
+ *((u32 *) dummy_job_urt + 1) = (((dummy_job_rmu - dummy_job) + base_address) >> 24) & 0xff;
+}
+
+/**
+ * Initialize the memory for 8401 workaround.
+ */
+
+mali_error kbasep_8401_workaround_init(kbase_device * const kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ kbase_context *workaround_kctx;
+ int i;
+ u16 as_present_mask;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kbdev->workaround_kctx == NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ /* For this workaround we reserve one address space to allow us to
+ * submit a special job independent of other contexts */
+ --(kbdev->nr_hw_address_spaces);
+
+ /* Only update nr_user_address_spaces if it was unchanged - to ensure
+ * HW workarounds that have modified this will still work */
+ if (kbdev->nr_user_address_spaces == (kbdev->nr_hw_address_spaces + 1))
+ --(kbdev->nr_user_address_spaces);
+
+ KBASE_DEBUG_ASSERT(kbdev->nr_user_address_spaces <= kbdev->nr_hw_address_spaces);
+
+ /* Recalculate the free address spaces bit-pattern */
+ as_present_mask = (1U << kbdev->nr_hw_address_spaces) - 1;
+ js_devdata->as_free &= as_present_mask;
+
+ workaround_kctx = kbase_create_context(kbdev);
+ if (!workaround_kctx)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ /* Allocate the pages required to contain the job */
+ if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(&workaround_kctx->osalloc, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa, 0))
+ goto no_pages;
+
+ /* Get virtual address of mapped memory and write a compute job for each page */
+ for (i = 0; i < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT; i++) {
+ kbdev->workaround_compute_job_va[i] = kmap(pfn_to_page(PFN_DOWN(kbdev->workaround_compute_job_pa[i])));
+ if (NULL == kbdev->workaround_compute_job_va[i])
+ goto page_free;
+
+ /* Generate the compute job data */
+ kbasep_8401_workaround_update_job_pointers((u32 *) kbdev->workaround_compute_job_va[i], i);
+ }
+
+ /* Insert pages to the gpu mmu. */
+ kbase_gpu_vm_lock(workaround_kctx);
+
+ kbase_mmu_insert_pages(workaround_kctx,
+ /* vpfn = page number */
+ (u64) WORKAROUND_PAGE_OFFSET,
+ /* physical address */
+ kbdev->workaround_compute_job_pa,
+ /* number of pages */
+ KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT,
+ /* flags */
+ KBASE_REG_GPU_RD | KBASE_REG_CPU_RD | KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
+
+ kbase_gpu_vm_unlock(workaround_kctx);
+
+ kbdev->workaround_kctx = workaround_kctx;
+ return MALI_ERROR_NONE;
+ page_free:
+ while (i--)
+ kunmap(pfn_to_page(PFN_DOWN(kbdev->workaround_compute_job_pa[i])));
+
+ kbase_mem_allocator_free(&workaround_kctx->osalloc, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa, MALI_TRUE);
+ no_pages:
+ kbase_destroy_context(workaround_kctx);
+
+ return MALI_ERROR_FUNCTION_FAILED;
+}
+
+/**
+ * Free up the memory used by 8401 workaround.
+ **/
+
+void kbasep_8401_workaround_term(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ int i;
+ u16 restored_as;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kbdev->workaround_kctx);
+
+ js_devdata = &kbdev->js_data;
+
+ for (i = 0; i < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT; i++)
+ kunmap(pfn_to_page(PFN_DOWN(kbdev->workaround_compute_job_pa[i])));
+
+ kbase_mem_allocator_free(&kbdev->workaround_kctx->osalloc, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa, MALI_TRUE);
+
+ kbase_destroy_context(kbdev->workaround_kctx);
+ kbdev->workaround_kctx = NULL;
+
+ /* Free up the workaround address space */
+ kbdev->nr_hw_address_spaces++;
+
+ if (kbdev->nr_user_address_spaces == (kbdev->nr_hw_address_spaces - 1)) {
+ /* Only update nr_user_address_spaces if it was unchanged - to ensure
+ * HW workarounds that have modified this will still work */
+ ++(kbdev->nr_user_address_spaces);
+ }
+ KBASE_DEBUG_ASSERT(kbdev->nr_user_address_spaces <= kbdev->nr_hw_address_spaces);
+
+ /* Recalculate the free address spaces bit-pattern */
+ restored_as = (1U << kbdev->nr_hw_address_spaces);
+ js_devdata->as_free |= restored_as;
+}
+
+/**
+ * Submit the 8401 workaround job.
+ *
+ * Important for BASE_HW_ISSUE_8987: This job always uses 16 RMUs
+ * - Therefore, on slot[1] it will always use the same number of RMUs as another
+ * GLES job.
+ * - On slot[2], no other job (GLES or otherwise) will be running on the
+ * cores, by virtue of it being slot[2]. Therefore, any value of RMUs is
+ * acceptable.
+ */
+void kbasep_8401_submit_dummy_job(kbase_device *kbdev, int js)
+{
+ u32 cfg;
+ mali_addr64 jc;
+ u32 pgd_high;
+
+ /* While this workaround is active we reserve the last address space just for submitting the dummy jobs */
+ int as = kbdev->nr_hw_address_spaces;
+
+ /* Don't issue compute jobs on job slot 0 */
+ KBASE_DEBUG_ASSERT(js != 0);
+ KBASE_DEBUG_ASSERT(js < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT);
+
+ /* Job chain GPU address */
+ jc = (js + WORKAROUND_PAGE_OFFSET) * PAGE_SIZE; /* GPU phys address (see kbase_mmu_insert_pages call in kbasep_8401_workaround_init */
+
+ /* Clear the job status words which may contain values from a previous job completion */
+ memset(kbdev->workaround_compute_job_va[js], 0, 4 * sizeof(u32));
+
+ /* Get the affinity of the previous job */
+ dummy_job_atom[js].affinity = ((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_LO), NULL)) | (((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_HI), NULL)) << 32);
+
+ /* Don't submit a compute job if the affinity was previously zero (i.e. no jobs have run yet on this slot) */
+ if (!dummy_job_atom[js].affinity)
+ return;
+
+ /* Ensure that our page tables are programmed into the MMU */
+ kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_LO), (kbdev->workaround_kctx->pgd & ASn_TRANSTAB_ADDR_SPACE_MASK) | ASn_TRANSTAB_READ_INNER | ASn_TRANSTAB_ADRMODE_TABLE, NULL);
+
+ /* Need to use a conditional expression to avoid "right shift count >= width of type"
+ * error when using an if statement - although the size_of condition is evaluated at compile
+ * time the unused branch is not removed until after it is type-checked and the error
+ * produced.
+ */
+ pgd_high = sizeof(kbdev->workaround_kctx->pgd) > 4 ? (kbdev->workaround_kctx->pgd >> 32) : 0;
+ kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_HI), pgd_high, NULL);
+
+ kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_LO), ASn_MEMATTR_IMPL_DEF_CACHE_POLICY, NULL);
+ kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_HI), ASn_MEMATTR_IMPL_DEF_CACHE_POLICY, NULL);
+ kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), jc & 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), jc >> 32, NULL);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), dummy_job_atom[js].affinity & 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), dummy_job_atom[js].affinity >> 32, NULL);
+
+ /* start MMU, medium priority, cache clean/flush on end, clean/flush on start */
+ cfg = as | JSn_CONFIG_END_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_START_MMU | JSn_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_THREAD_PRI(8);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_CONFIG_NEXT), cfg, NULL);
+
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SUBMIT, NULL, 0, jc, js);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_START, NULL);
+ /* Report that the job has been submitted */
+ kbasep_jm_enqueue_submit_slot(&kbdev->jm_slots[js], &dummy_job_atom[js]);
+}
+
+/**
+ * Check if the katom given is a dummy compute job.
+ */
+mali_bool kbasep_8401_is_workaround_job(kbase_jd_atom *katom)
+{
+ int i;
+
+ /* Note: we don't check the first dummy_job_atom as slot 0 is never used for the workaround */
+ for (i = 1; i < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT; i++) {
+ if (katom == &dummy_job_atom[i]) {
+ /* This is a dummy job */
+ return MALI_TRUE;
+ }
+ }
+
+ /* This is a real job */
+ return MALI_FALSE;
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.h
new file mode 100755
index 00000000000..49e673587f1
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_8401_workaround.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_8401_workaround.h
+ * Functions related to working around BASE_HW_ISSUE_8401
+ */
+
+#ifndef _KBASE_8401_WORKAROUND_H_
+#define _KBASE_8401_WORKAROUND_H_
+
+mali_error kbasep_8401_workaround_init(kbase_device * const kbdev);
+void kbasep_8401_workaround_term(kbase_device *kbdev);
+void kbasep_8401_submit_dummy_job(kbase_device *kbdev, int js);
+mali_bool kbasep_8401_is_workaround_job(kbase_jd_atom *katom);
+
+#endif /* _KBASE_8401_WORKAROUND_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.c
new file mode 100755
index 00000000000..202ca420e9a
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.c
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_cache_policy.h
+ * Cache Policy API.
+ */
+
+#include "mali_kbase_cache_policy.h"
+
+/*
+ * The output flags should be a combination of the following values:
+ * KBASE_REG_CPU_CACHED: CPU cache should be enabled
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
+{
+ u32 cache_flags = 0;
+
+ CSTD_UNUSED(nr_pages);
+
+ if (flags & BASE_MEM_CACHED_CPU)
+ cache_flags |= KBASE_REG_CPU_CACHED;
+
+ return cache_flags;
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.h
new file mode 100755
index 00000000000..615105b8723
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cache_policy.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_cache_policy.h
+ * Cache Policy API.
+ */
+
+#ifndef _KBASE_CACHE_POLICY_H_
+#define _KBASE_CACHE_POLICY_H_
+
+#include <malisw/mali_malisw.h>
+#include "mali_kbase.h"
+#include <kbase/mali_base_kernel.h>
+
+/**
+ * @brief Choose the cache policy for a specific region
+ *
+ * Tells whether the CPU and GPU caches should be enabled or not for a specific region.
+ * This function can be modified to customize the cache policy depending on the flags
+ * and size of the region.
+ *
+ * @param[in] flags flags describing attributes of the region
+ * @param[in] nr_pages total number of pages (backed or not) for the region
+ *
+ * @return a combination of KBASE_REG_CPU_CACHED and KBASE_REG_GPU_CACHED depending
+ * on the cache policy
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages);
+
+#endif /* _KBASE_CACHE_POLICY_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_config.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_config.c
new file mode 100755
index 00000000000..5ae5ad908b7
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_config.c
@@ -0,0 +1,607 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_kbase_defs.h>
+#include <kbase/src/common/mali_kbase_cpuprops.h>
+#ifdef CONFIG_UMP
+#include <linux/ump-common.h>
+#endif /* CONFIG_UMP */
+
+/* Specifies how many attributes are permitted in the config (excluding terminating attribute).
+ * This is used in validation function so we can detect if configuration is properly terminated. This value can be
+ * changed if we need to introduce more attributes or many memory regions need to be defined */
+#define ATTRIBUTE_COUNT_MAX 32
+
+/* right now we allow only 2 memory attributes (excluding termination attribute) */
+#define MEMORY_ATTRIBUTE_COUNT_MAX 2
+
+/* Limits for gpu frequency configuration parameters. These will use for config validation. */
+#define MAX_GPU_ALLOWED_FREQ_KHZ 1000000
+#define MIN_GPU_ALLOWED_FREQ_KHZ 1
+
+/* Default irq throttle time. This is the default desired minimum time in between two consecutive
+ * interrupts from the gpu. The irq throttle gpu register is set after this value. */
+#define DEFAULT_IRQ_THROTTLE_TIME_US 20
+
+/*** Begin Scheduling defaults ***/
+
+/**
+ * Default scheduling tick granuality, in nanoseconds
+ */
+#define DEFAULT_JS_SCHEDULING_TICK_NS 100000000u /* 100ms */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are soft-stopped.
+ *
+ * This defines the time-slice for a job (which may be different from that of a context)
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS 1 /* Between 0.1 and 0.2s before soft-stop */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS_HW_ISSUE_8408 12 /* 1.2s before hard-stop, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS 2 /* Between 0.2 and 0.3s before hard-stop */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ * during dumping
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_NSS 600 /* 60s @ 100ms tick */
+
+/**
+ * Default minimum number of scheduling ticks before the GPU is reset
+ * to clear a "stuck" job
+ */
+#define DEFAULT_JS_RESET_TICKS_SS_HW_ISSUE_8408 18 /* 1.8s before resetting GPU, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) */
+#define DEFAULT_JS_RESET_TICKS_SS 3 /* 0.3-0.4s before GPU is reset */
+
+/**
+ * Default minimum number of scheduling ticks before the GPU is reset
+ * to clear a "stuck" job during dumping.
+ */
+#define DEFAULT_JS_RESET_TICKS_NSS 601 /* 60.1s @ 100ms tick */
+
+/**
+ * Number of milliseconds given for other jobs on the GPU to be
+ * soft-stopped when the GPU needs to be reset.
+ */
+#define DEFAULT_JS_RESET_TIMEOUT_MS 3000
+
+/**
+ * Default timeslice that a context is scheduled in for, in nanoseconds.
+ *
+ * When a context has used up this amount of time across its jobs, it is
+ * scheduled out to let another run.
+ *
+ * @note the resolution is nanoseconds (ns) here, because that's the format
+ * often used by the OS.
+ */
+#define DEFAULT_JS_CTX_TIMESLICE_NS 50000000 /* 0.05s - at 20fps a ctx does at least 1 frame before being scheduled out. At 40fps, 2 frames, etc */
+
+/**
+ * Default initial runtime of a context for CFS, in ticks.
+ *
+ * This value is relative to that of the least-run context, and defines where
+ * in the CFS queue a new context is added.
+ */
+#define DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES 1
+
+/**
+ * Default minimum runtime value of a context for CFS, in ticks.
+ *
+ * This value is relative to that of the least-run context. This prevents
+ * "stored-up timeslices" DoS attacks.
+ */
+#define DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES 2
+
+/**
+ * Default setting for whether to prefer security or performance.
+ *
+ * Currently affects only r0p0-15dev0 HW and earlier.
+ */
+#define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE MALI_FALSE
+
+/**
+ * Default setting for read Address ID limiting on AXI.
+ */
+#define DEFAULT_ARID_LIMIT KBASE_AID_32
+
+/**
+ * Default setting for write Address ID limiting on AXI.
+ */
+#define DEFAULT_AWID_LIMIT KBASE_AID_32
+
+/**
+ * Default setting for using alternative hardware counters.
+ */
+#define DEFAULT_ALTERNATIVE_HWC MALI_FALSE
+
+/*** End Scheduling defaults ***/
+
+/*** Begin Power Manager defaults */
+
+#define DEFAULT_PM_DVFS_FREQ 500 /* Milliseconds */
+
+/**
+ * Default poweroff tick granuality, in nanoseconds
+ */
+#define DEFAULT_PM_GPU_POWEROFF_TICK_NS 400000 /* 400us */
+
+/**
+ * Default number of poweroff ticks before shader cores are powered off
+ */
+#define DEFAULT_PM_POWEROFF_TICK_SHADER 2 /* 400-800us */
+
+/**
+ * Default number of poweroff ticks before GPU is powered off
+ */
+#define DEFAULT_PM_POWEROFF_TICK_GPU 2 /* 400-800us */
+
+/*** End Power Manager defaults ***/
+
+/**
+ * Default value for KBASE_CONFIG_ATTR_CPU_SPEED_FUNC.
+ * Points to @ref kbase_cpuprops_get_default_clock_speed.
+ */
+#define DEFAULT_CPU_SPEED_FUNC ((uintptr_t)kbase_cpuprops_get_default_clock_speed)
+
+int kbasep_get_config_attribute_count(const kbase_attribute *attributes)
+{
+ int count = 1;
+
+ KBASE_DEBUG_ASSERT(attributes != NULL);
+
+ while (attributes->id != KBASE_CONFIG_ATTR_END) {
+ attributes++;
+ count++;
+ }
+
+ return count;
+}
+
+const kbase_attribute *kbasep_get_next_attribute(const kbase_attribute *attributes, int attribute_id)
+{
+ KBASE_DEBUG_ASSERT(attributes != NULL);
+
+ while (attributes->id != KBASE_CONFIG_ATTR_END) {
+ if (attributes->id == attribute_id)
+ return attributes;
+
+ attributes++;
+ }
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_get_next_attribute)
+
+uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const kbase_attribute *attributes, int attribute_id)
+{
+ const kbase_attribute *attr;
+
+ KBASE_DEBUG_ASSERT(attributes != NULL);
+
+ attr = kbasep_get_next_attribute(attributes, attribute_id);
+ if (attr != NULL)
+ return attr->data;
+
+ /* default values */
+ switch (attribute_id) {
+ case KBASE_CONFIG_ATTR_MEMORY_PER_PROCESS_LIMIT:
+ return (uintptr_t) -1;
+#ifdef CONFIG_UMP
+ case KBASE_CONFIG_ATTR_UMP_DEVICE:
+ return UMP_DEVICE_W_SHIFT;
+#endif /* CONFIG_UMP */
+ case KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_MAX:
+ return (uintptr_t) -1;
+ case KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_PERF_GPU:
+ return KBASE_MEM_PERF_NORMAL;
+ case KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US:
+ return DEFAULT_IRQ_THROTTLE_TIME_US;
+ /* Begin scheduling defaults */
+ case KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS:
+ return DEFAULT_JS_SCHEDULING_TICK_NS;
+ case KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS:
+ return DEFAULT_JS_SOFT_STOP_TICKS;
+ case KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS:
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+ return DEFAULT_JS_HARD_STOP_TICKS_SS_HW_ISSUE_8408;
+ else
+ return DEFAULT_JS_HARD_STOP_TICKS_SS;
+ case KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS:
+ return DEFAULT_JS_HARD_STOP_TICKS_NSS;
+ case KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS:
+ return DEFAULT_JS_CTX_TIMESLICE_NS;
+ case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES:
+ return DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES;
+ case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES:
+ return DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES;
+ case KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS:
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+ return DEFAULT_JS_RESET_TICKS_SS_HW_ISSUE_8408;
+ else
+ return DEFAULT_JS_RESET_TICKS_SS;
+ case KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS:
+ return DEFAULT_JS_RESET_TICKS_NSS;
+ case KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS:
+ return DEFAULT_JS_RESET_TIMEOUT_MS;
+ /* End scheduling defaults */
+ case KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS:
+ return 0;
+ case KBASE_CONFIG_ATTR_PLATFORM_FUNCS:
+ return 0;
+ case KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE:
+ return DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
+ case KBASE_CONFIG_ATTR_CPU_SPEED_FUNC:
+ return DEFAULT_CPU_SPEED_FUNC;
+ case KBASE_CONFIG_ATTR_GPU_SPEED_FUNC:
+ return 0;
+ case KBASE_CONFIG_ATTR_ARID_LIMIT:
+ return DEFAULT_ARID_LIMIT;
+ case KBASE_CONFIG_ATTR_AWID_LIMIT:
+ return DEFAULT_AWID_LIMIT;
+ case KBASE_CONFIG_ATTR_ALTERNATIVE_HWC:
+ return DEFAULT_ALTERNATIVE_HWC;
+ case KBASE_CONFIG_ATTR_POWER_MANAGEMENT_DVFS_FREQ:
+ return DEFAULT_PM_DVFS_FREQ;
+ case KBASE_CONFIG_ATTR_PM_GPU_POWEROFF_TICK_NS:
+ return DEFAULT_PM_GPU_POWEROFF_TICK_NS;
+ case KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_SHADER:
+ return DEFAULT_PM_POWEROFF_TICK_SHADER;
+ case KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_GPU:
+ return DEFAULT_PM_POWEROFF_TICK_GPU;
+
+ default:
+ KBASE_DEBUG_PRINT_ERROR(KBASE_CORE, "kbasep_get_config_value. Cannot get value of attribute with id=%d and no default value defined", attribute_id);
+ return 0;
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbasep_get_config_value)
+
+mali_bool kbasep_platform_device_init(kbase_device *kbdev)
+{
+ kbase_platform_funcs_conf *platform_funcs;
+
+ platform_funcs = (kbase_platform_funcs_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PLATFORM_FUNCS);
+ if (platform_funcs) {
+ if (platform_funcs->platform_init_func)
+ return platform_funcs->platform_init_func(kbdev);
+ }
+ return MALI_TRUE;
+}
+
+void kbasep_platform_device_term(kbase_device *kbdev)
+{
+ kbase_platform_funcs_conf *platform_funcs;
+
+ platform_funcs = (kbase_platform_funcs_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PLATFORM_FUNCS);
+ if (platform_funcs) {
+ if (platform_funcs->platform_term_func)
+ platform_funcs->platform_term_func(kbdev);
+ }
+}
+
+#ifdef CONFIG_UMP
+static mali_bool kbasep_validate_ump_device(int ump_device)
+{
+ mali_bool valid;
+
+ switch (ump_device) {
+ case UMP_DEVICE_W_SHIFT:
+ case UMP_DEVICE_X_SHIFT:
+ case UMP_DEVICE_Y_SHIFT:
+ case UMP_DEVICE_Z_SHIFT:
+ valid = MALI_TRUE;
+ break;
+ default:
+ valid = MALI_FALSE;
+ break;
+ }
+ return valid;
+}
+#endif /* CONFIG_UMP */
+
+static mali_bool kbasep_validate_memory_performance(kbase_memory_performance performance)
+{
+ return performance <= KBASE_MEM_PERF_MAX_VALUE;
+}
+
+static mali_bool kbasep_validate_memory_resource(const kbase_memory_resource *memory_resource)
+{
+ KBASE_DEBUG_ASSERT(memory_resource != NULL);
+
+ if (memory_resource->name == NULL) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Unnamed memory region found");
+ return MALI_FALSE;
+ }
+
+ if (memory_resource->base & ((1 << PAGE_SHIFT) - 1)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Base address of \"%s\" memory region is not page aligned", memory_resource->name);
+ return MALI_FALSE;
+ }
+
+ if (memory_resource->size & ((1 << PAGE_SHIFT) - 1)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Size of \"%s\" memory region is not a multiple of page size", memory_resource->name);
+ return MALI_FALSE;
+ }
+
+ if (memory_resource->attributes != NULL) { /* we allow NULL attribute list */
+ int i;
+
+ for (i = 0; memory_resource->attributes[i].id != KBASE_MEM_ATTR_END; i++) {
+ if (i >= MEMORY_ATTRIBUTE_COUNT_MAX) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "More than MEMORY_ATTRIBUTE_COUNT_MAX=%d configuration attributes defined. Is memory attribute list properly terminated?", MEMORY_ATTRIBUTE_COUNT_MAX);
+ return MALI_FALSE;
+ }
+ switch (memory_resource->attributes[i].id) {
+ case KBASE_MEM_ATTR_PERF_CPU:
+ if (MALI_TRUE != kbasep_validate_memory_performance((kbase_memory_performance) memory_resource->attributes[i].data)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "CPU performance of \"%s\" region is invalid: %d", memory_resource->name, (kbase_memory_performance) memory_resource->attributes[i].data);
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_MEM_ATTR_PERF_GPU:
+ if (MALI_TRUE != kbasep_validate_memory_performance((kbase_memory_performance) memory_resource->attributes[i].data)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "GPU performance of \"%s\" region is invalid: %d", memory_resource->name, (kbase_memory_performance) memory_resource->attributes[i].data);
+ return MALI_FALSE;
+ }
+ break;
+ default:
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid memory attribute found in \"%s\" memory region: %d", memory_resource->name, memory_resource->attributes[i].id);
+ return MALI_FALSE;
+ }
+ }
+ }
+
+ return MALI_TRUE;
+}
+
+static mali_bool kbasep_validate_gpu_clock_freq(kbase_device *kbdev, const kbase_attribute *attributes)
+{
+ uintptr_t freq_min = kbasep_get_config_value(kbdev, attributes, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN);
+ uintptr_t freq_max = kbasep_get_config_value(kbdev, attributes, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX);
+
+ if ((freq_min > MAX_GPU_ALLOWED_FREQ_KHZ) || (freq_min < MIN_GPU_ALLOWED_FREQ_KHZ) || (freq_max > MAX_GPU_ALLOWED_FREQ_KHZ) || (freq_max < MIN_GPU_ALLOWED_FREQ_KHZ) || (freq_min > freq_max)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid GPU frequencies found in configuration: min=%ldkHz, max=%ldkHz.", freq_min, freq_max);
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
+
+static mali_bool kbasep_validate_pm_callback(const kbase_pm_callback_conf *callbacks)
+{
+ if (callbacks == NULL) {
+ /* Having no callbacks is valid */
+ return MALI_TRUE;
+ }
+
+ if ((callbacks->power_off_callback != NULL && callbacks->power_on_callback == NULL) || (callbacks->power_off_callback == NULL && callbacks->power_on_callback != NULL)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid power management callbacks: Only one of power_off_callback and power_on_callback was specified");
+ return MALI_FALSE;
+ }
+ return MALI_TRUE;
+}
+
+static mali_bool kbasep_validate_cpu_speed_func(kbase_cpuprops_clock_speed_function fcn)
+{
+ return fcn != NULL;
+}
+
+mali_bool kbasep_validate_configuration_attributes(kbase_device *kbdev, const kbase_attribute *attributes)
+{
+ int i;
+ mali_bool had_gpu_freq_min = MALI_FALSE, had_gpu_freq_max = MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT(attributes);
+
+ for (i = 0; attributes[i].id != KBASE_CONFIG_ATTR_END; i++) {
+ if (i >= ATTRIBUTE_COUNT_MAX) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "More than ATTRIBUTE_COUNT_MAX=%d configuration attributes defined. Is attribute list properly terminated?", ATTRIBUTE_COUNT_MAX);
+ return MALI_FALSE;
+ }
+
+ switch (attributes[i].id) {
+ case KBASE_CONFIG_ATTR_MEMORY_RESOURCE:
+ if (MALI_FALSE == kbasep_validate_memory_resource((kbase_memory_resource *) attributes[i].data)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid memory region found in configuration");
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_MAX:
+ /* Some shared memory is required for GPU page tables, see MIDBASE-1534 */
+ if (0 == attributes[i].data) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Maximum OS Shared Memory Maximum is set to 0 which is not supported");
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_PERF_GPU:
+ if (MALI_FALSE == kbasep_validate_memory_performance((kbase_memory_performance) attributes[i].data)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Shared OS memory GPU performance attribute has invalid value: %d", (kbase_memory_performance) attributes[i].data);
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_MEMORY_PER_PROCESS_LIMIT:
+ /* any value is allowed */
+ break;
+#ifdef CONFIG_UMP
+ case KBASE_CONFIG_ATTR_UMP_DEVICE:
+ if (MALI_FALSE == kbasep_validate_ump_device(attributes[i].data)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Unknown UMP device found in configuration: %d", (int)attributes[i].data);
+ return MALI_FALSE;
+ }
+ break;
+#endif /* CONFIG_UMP */
+
+ case KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN:
+ had_gpu_freq_min = MALI_TRUE;
+ if (MALI_FALSE == kbasep_validate_gpu_clock_freq(kbdev, attributes)) {
+ /* Warning message handled by kbasep_validate_gpu_clock_freq() */
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX:
+ had_gpu_freq_max = MALI_TRUE;
+ if (MALI_FALSE == kbasep_validate_gpu_clock_freq(kbdev, attributes)) {
+ /* Warning message handled by kbasep_validate_gpu_clock_freq() */
+ return MALI_FALSE;
+ }
+ break;
+
+ /* Only non-zero unsigned 32-bit values accepted */
+ case KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS:
+#if CSTD_CPU_64BIT
+ if (attributes[i].data == 0u || (u64) attributes[i].data > (u64) U32_MAX)
+#else
+ if (attributes[i].data == 0u)
+#endif
+ {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid Job Scheduling Configuration attribute for " "KBASE_CONFIG_ATTR_JS_SCHEDULING_TICKS_NS: %d", (int)attributes[i].data);
+ return MALI_FALSE;
+ }
+ break;
+
+ /* All these Job Scheduling attributes are FALLTHROUGH: only unsigned 32-bit values accepted */
+ case KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS:
+ case KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS:
+ case KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS:
+ case KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS:
+ case KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS:
+ case KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS:
+ case KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS:
+ case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES:
+ case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES:
+#if CSTD_CPU_64BIT
+ if ((u64) attributes[i].data > (u64) U32_MAX) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Job Scheduling Configuration attribute exceeds 32-bits: " "id==%d val==%d", attributes[i].id, (int)attributes[i].data);
+ return MALI_FALSE;
+ }
+#endif
+ break;
+
+ case KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US:
+#if CSTD_CPU_64BIT
+ if ((u64) attributes[i].data > (u64) U32_MAX) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "IRQ throttle time attribute exceeds 32-bits: " "id==%d val==%d", attributes[i].id, (int)attributes[i].data);
+ return MALI_FALSE;
+ }
+#endif
+ break;
+
+ case KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS:
+ if (MALI_FALSE == kbasep_validate_pm_callback((kbase_pm_callback_conf *) attributes[i].data)) {
+ /* Warning message handled by kbasep_validate_pm_callback() */
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE:
+ if (attributes[i].data != MALI_TRUE && attributes[i].data != MALI_FALSE) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Value for KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE was not " "MALI_TRUE or MALI_FALSE: %u", (unsigned int)attributes[i].data);
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_CPU_SPEED_FUNC:
+ if (MALI_FALSE == kbasep_validate_cpu_speed_func((kbase_cpuprops_clock_speed_function) attributes[i].data)) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid function pointer in KBASE_CONFIG_ATTR_CPU_SPEED_FUNC");
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_GPU_SPEED_FUNC:
+ if (0 == attributes[i].data) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid function pointer in KBASE_CONFIG_ATTR_GPU_SPEED_FUNC");
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_PLATFORM_FUNCS:
+ /* any value is allowed */
+ break;
+
+ case KBASE_CONFIG_ATTR_AWID_LIMIT:
+ case KBASE_CONFIG_ATTR_ARID_LIMIT:
+ if ((u32) attributes[i].data > 0x3) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid AWID or ARID limit");
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_ALTERNATIVE_HWC:
+ if (attributes[i].data != MALI_TRUE && attributes[i].data != MALI_FALSE) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Value for KBASE_CONFIG_ATTR_ALTERNATIVE_HWC was not " "MALI_TRUE or MALI_FALSE: %u", (unsigned int)attributes[i].data);
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_POWER_MANAGEMENT_DVFS_FREQ:
+#if CSTD_CPU_64BIT
+ if ((u64) attributes[i].data > (u64) U32_MAX) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "PM DVFS interval exceeds 32-bits: " "id==%d val==%d", attributes[i].id, (int)attributes[i].data);
+ return MALI_FALSE;
+ }
+#endif
+ break;
+
+ case KBASE_CONFIG_ATTR_PM_GPU_POWEROFF_TICK_NS:
+#if CSTD_CPU_64BIT
+ if (attributes[i].data == 0u || (u64) attributes[i].data > (u64) U32_MAX) {
+#else
+ if (attributes[i].data == 0u) {
+#endif
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid Power Manager Configuration attribute for " "KBASE_CONFIG_ATTR_PM_GPU_POWEROFF_TICK_NS: %d", (int)attributes[i].data);
+ return MALI_FALSE;
+ }
+ break;
+
+ case KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_SHADER:
+ case KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_GPU:
+#if CSTD_CPU_64BIT
+ if ((u64) attributes[i].data > (u64) U32_MAX) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Power Manager Configuration attribute exceeds 32-bits: " "id==%d val==%d", attributes[i].id, (int)attributes[i].data);
+ return MALI_FALSE;
+ }
+#endif
+ break;
+
+ default:
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid attribute found in configuration: %d", attributes[i].id);
+ return MALI_FALSE;
+ }
+ }
+
+ if (!had_gpu_freq_min) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Configuration does not include mandatory attribute KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN");
+ return MALI_FALSE;
+ }
+
+ if (!had_gpu_freq_max) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Configuration does not include mandatory attribute KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX");
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_context.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_context.c
new file mode 100755
index 00000000000..05867d83756
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_context.c
@@ -0,0 +1,227 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_context.c
+ * Base kernel context APIs
+ */
+
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_midg_regmap.h>
+
+#define MEMPOOL_PAGES 16384
+
+/**
+ * @brief Create a kernel base context.
+ *
+ * Allocate and init a kernel base context. Calls
+ * kbase_create_os_context() to setup OS specific structures.
+ */
+kbase_context *kbase_create_context(kbase_device *kbdev)
+{
+ kbase_context *kctx;
+ mali_error mali_err;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ /* zero-inited as lot of code assume it's zero'ed out on create */
+ kctx = vzalloc(sizeof(*kctx));
+
+ if (!kctx)
+ goto out;
+
+ kctx->kbdev = kbdev;
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ kctx->timeline.owner_tgid = task_tgid_nr(current);
+#endif
+ atomic_set(&kctx->setup_complete, 0);
+ atomic_set(&kctx->setup_in_progress, 0);
+ kctx->keep_gpu_powered = MALI_FALSE;
+ spin_lock_init(&kctx->mm_update_lock);
+ kctx->process_mm = NULL;
+ atomic_set(&kctx->nonmapped_pages, 0);
+
+ if (MALI_ERROR_NONE != kbase_mem_allocator_init(&kctx->osalloc, MEMPOOL_PAGES))
+ goto free_kctx;
+
+ kctx->pgd_allocator = &kctx->osalloc;
+ if (kbase_mem_usage_init(&kctx->usage, kctx->kbdev->memdev.per_process_memory_limit >> PAGE_SHIFT))
+ goto free_allocator;
+
+
+ if (kbase_jd_init(kctx))
+ goto free_memctx;
+
+ mali_err = kbasep_js_kctx_init(kctx);
+ if (MALI_ERROR_NONE != mali_err)
+ goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
+
+ mali_err = kbase_event_init(kctx);
+ if (MALI_ERROR_NONE != mali_err)
+ goto free_jd;
+
+ mutex_init(&kctx->reg_lock);
+
+ INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
+#ifdef CONFIG_KDS
+ INIT_LIST_HEAD(&kctx->waiting_kds_resource);
+#endif
+
+ mali_err = kbase_mmu_init(kctx);
+ if (MALI_ERROR_NONE != mali_err)
+ goto free_event;
+
+ kctx->pgd = kbase_mmu_alloc_pgd(kctx);
+ if (!kctx->pgd)
+ goto free_mmu;
+
+ if (kbase_create_os_context(&kctx->osctx))
+ goto free_pgd;
+
+ /* Make sure page 0 is not used... */
+ if (kbase_region_tracker_init(kctx))
+ goto free_osctx;
+#ifdef CONFIG_GPU_TRACEPOINTS
+ atomic_set(&kctx->jctx.work_id, 0);
+#endif
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
+#endif
+
+ return kctx;
+
+ free_osctx:
+ kbase_destroy_os_context(&kctx->osctx);
+ free_pgd:
+ kbase_mmu_free_pgd(kctx);
+ free_mmu:
+ kbase_mmu_term(kctx);
+ free_event:
+ kbase_event_cleanup(kctx);
+ free_jd:
+ /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+ kbasep_js_kctx_term(kctx);
+ kbase_jd_exit(kctx);
+ free_memctx:
+ kbase_mem_usage_term(&kctx->usage);
+ free_allocator:
+ kbase_mem_allocator_term(&kctx->osalloc);
+ free_kctx:
+ vfree(kctx);
+ out:
+ return NULL;
+
+}
+KBASE_EXPORT_SYMBOL(kbase_create_context)
+
+/**
+ * @brief Destroy a kernel base context.
+ *
+ * Destroy a kernel base context. Calls kbase_destroy_os_context() to
+ * free OS specific structures. Will release all outstanding regions.
+ */
+void kbase_destroy_context(kbase_context *kctx)
+{
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
+
+ /* Ensure the core is powered up for the destroy process */
+ /* A suspend won't happen here, because we're in a syscall from a userspace
+ * thread. */
+ kbase_pm_context_active(kbdev);
+
+ if (kbdev->hwcnt.kctx == kctx) {
+ /* disable the use of the hw counters if the app didn't use the API correctly or crashed */
+ KBASE_TRACE_ADD(kbdev, CORE_CTX_HWINSTR_TERM, kctx, NULL, 0u, 0u);
+ KBASE_DEBUG_PRINT_WARN(KBASE_CTX, "The privileged process asking for instrumentation forgot to disable it " "before exiting. Will end instrumentation for them");
+ kbase_instr_hwcnt_disable(kctx);
+ }
+
+ kbase_jd_zap_context(kctx);
+ kbase_event_cleanup(kctx);
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* MMU is disabled as part of scheduling out the context */
+ kbase_mmu_free_pgd(kctx);
+ kbase_region_tracker_term(kctx);
+ kbase_destroy_os_context(&kctx->osctx);
+ kbase_gpu_vm_unlock(kctx);
+
+ /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+ kbasep_js_kctx_term(kctx);
+
+ kbase_jd_exit(kctx);
+
+ kbase_pm_context_idle(kbdev);
+
+ kbase_mmu_term(kctx);
+
+ kbase_mem_usage_term(&kctx->usage);
+
+ if (kctx->keep_gpu_powered) {
+ atomic_dec(&kbdev->keep_gpu_powered_count);
+ kbase_pm_context_idle(kbdev);
+ }
+
+ kbase_mem_allocator_term(&kctx->osalloc);
+ WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
+ vfree(kctx);
+}
+KBASE_EXPORT_SYMBOL(kbase_destroy_context)
+
+/**
+ * Set creation flags on a context
+ */
+mali_error kbase_context_set_create_flags(kbase_context *kctx, u32 flags)
+{
+ mali_error err = MALI_ERROR_NONE;
+ kbasep_js_kctx_info *js_kctx_info;
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* Validate flags */
+ if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
+ err = MALI_ERROR_FUNCTION_FAILED;
+ goto out;
+ }
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* Translate the flags */
+ if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+ js_kctx_info->ctx.flags &= ~((u32) KBASE_CTX_FLAG_SUBMIT_DISABLED);
+
+ if ((flags & BASE_CONTEXT_HINT_ONLY_COMPUTE) != 0)
+ js_kctx_info->ctx.flags |= (u32) KBASE_CTX_FLAG_HINT_ONLY_COMPUTE;
+
+ /* Latch the initial attributes into the Job Scheduler */
+ kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ out:
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags)
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cpuprops.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cpuprops.c
new file mode 100755
index 00000000000..81c1afa367f
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cpuprops.c
@@ -0,0 +1,94 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_cpuprops.c
+ * Base kernel property query APIs
+ */
+
+#include "mali_kbase.h"
+#include "mali_kbase_cpuprops.h"
+#include "mali_kbase_uku.h"
+#include <kbase/mali_kbase_config.h>
+#include <linux/cache.h>
+#include <asm/cputype.h>
+
+#define L1_DCACHE_LINE_SIZE_LOG2 L1_CACHE_SHIFT
+
+/**
+ * @brief Macros used to extract cpu id info
+ * @see Doc's for Main ID register
+ */
+#define KBASE_CPUPROPS_ID_GET_REV(cpuid) ( (cpuid) & 0x0F ) /* [3:0] Revision */
+#define KBASE_CPUPROPS_ID_GET_PART_NR(cpuid)( ((cpuid) >> 4) & 0xFFF ) /* [15:4] Part number */
+#define KBASE_CPUPROPS_ID_GET_ARCH(cpuid) ( ((cpuid) >> 16) & 0x0F ) /* [19:16] Architecture */
+#define KBASE_CPUPROPS_ID_GET_VARIANT(cpuid)( ((cpuid) >> 20) & 0x0F ) /* [23:20] Variant */
+#define KBASE_CPUPROPS_ID_GET_CODE(cpuid) ( ((cpuid) >> 24) & 0xFF ) /* [31:23] ASCII code of implementer trademark */
+
+/*Below value sourced from OSK*/
+#define L1_DCACHE_SIZE ((u32)0x00008000)
+
+
+/**
+ * @brief Retrieves detailed CPU info from given cpu_val ( ID reg )
+ *
+ * @param kbase_props CPU props to be filled-in with cpu id info
+ * @param cpu_val CPU ID info
+ *
+ */
+static void kbasep_cpuprops_uk_get_cpu_id_info(kbase_uk_cpuprops * const kbase_props, u32 cpu_val)
+{
+ kbase_props->props.cpu_id.id = cpu_val;
+
+ kbase_props->props.cpu_id.rev = KBASE_CPUPROPS_ID_GET_REV(cpu_val);
+ kbase_props->props.cpu_id.part = KBASE_CPUPROPS_ID_GET_PART_NR(cpu_val);
+ kbase_props->props.cpu_id.arch = KBASE_CPUPROPS_ID_GET_ARCH(cpu_val);
+ kbase_props->props.cpu_id.variant = KBASE_CPUPROPS_ID_GET_VARIANT(cpu_val);
+ kbase_props->props.cpu_id.implementer = KBASE_CPUPROPS_ID_GET_CODE(cpu_val);
+
+}
+
+int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed)
+{
+ KBASE_DEBUG_ASSERT(NULL != clock_speed);
+
+ *clock_speed = 100;
+ return 0;
+}
+
+mali_error kbase_cpuprops_uk_get_props(kbase_context *kctx, kbase_uk_cpuprops * const kbase_props)
+{
+ int result;
+ kbase_cpuprops_clock_speed_function kbase_cpuprops_uk_get_clock_speed;
+
+ kbase_props->props.cpu_l1_dcache_line_size_log2 = L1_DCACHE_LINE_SIZE_LOG2;
+ kbase_props->props.cpu_l1_dcache_size = L1_DCACHE_SIZE;
+ kbase_props->props.cpu_flags = BASE_CPU_PROPERTY_FLAG_LITTLE_ENDIAN;
+
+ kbase_props->props.nr_cores = NR_CPUS;
+ kbase_props->props.cpu_page_size_log2 = PAGE_SHIFT;
+ kbase_props->props.available_memory_size = totalram_pages << PAGE_SHIFT;
+
+ kbasep_cpuprops_uk_get_cpu_id_info(kbase_props, read_cpuid_id());
+
+ kbase_cpuprops_uk_get_clock_speed = (kbase_cpuprops_clock_speed_function) kbasep_get_config_value(kctx->kbdev, kctx->kbdev->config_attributes, KBASE_CONFIG_ATTR_CPU_SPEED_FUNC);
+ result = kbase_cpuprops_uk_get_clock_speed(&kbase_props->props.max_cpu_clock_speed_mhz);
+ if (result != 0)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ return MALI_ERROR_NONE;
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cpuprops.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cpuprops.h
new file mode 100755
index 00000000000..b0a36b3ea54
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_cpuprops.h
@@ -0,0 +1,54 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_cpuprops.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_CPUPROPS_H_
+#define _KBASE_CPUPROPS_H_
+
+#include <malisw/mali_malisw.h>
+
+/* Forward declarations */
+struct kbase_uk_cpuprops;
+
+/**
+ * @brief Default implementation of @ref KBASE_CONFIG_ATTR_CPU_SPEED_FUNC.
+ *
+ * This function sets clock_speed to 100, so will be an underestimate for
+ * any real system.
+ *
+ * See @ref kbase_cpuprops_clock_speed_function for details on the parameters
+ * and return value.
+ */
+int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed);
+
+/**
+ * @brief Provides CPU properties data.
+ *
+ * Fill the kbase_uk_cpuprops with values from CPU configuration.
+ *
+ * @param kctx The kbase context
+ * @param kbase_props A copy of the kbase_uk_cpuprops structure from userspace
+ *
+ * @return MALI_ERROR_NONE on success. Any other value indicates failure.
+ */
+mali_error kbase_cpuprops_uk_get_props(kbase_context *kctx, struct kbase_uk_cpuprops * const kbase_props);
+
+#endif /*_KBASE_CPUPROPS_H_*/
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_debug.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_debug.c
new file mode 100755
index 00000000000..aaa904c3672
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_debug.c
@@ -0,0 +1,63 @@
+/*
+ *
+ * (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <kbase/src/common/mali_kbase.h>
+
+kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = {
+ NULL,
+ NULL
+};
+
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param)
+{
+ kbasep_debug_assert_registered_cb.func = func;
+ kbasep_debug_assert_registered_cb.param = param;
+}
+
+void kbasep_debug_assert_call_hook(void)
+{
+ if (kbasep_debug_assert_registered_cb.func != NULL)
+ kbasep_debug_assert_registered_cb.func(kbasep_debug_assert_registered_cb.param);
+}
+KBASE_EXPORT_SYMBOL(kbasep_debug_assert_call_hook);
+
+/**
+ * @brief Contains the module names (modules in the same order as for the kbase_module enumeration)
+ * @sa kbasep_module_to_str
+ */
+static const char *CONST kbasep_str_modules[] = {
+ "UNKNOWN", /**< Unknown module */
+ "BASE_MMU", /**< Base MMU */
+ "BASE_JD", /**< Base Job Dispatch */
+ "BASE_JM", /**< Base Job Manager */
+ "BASE_CORE", /**< Base Core */
+ "BASE_MEM", /**< Base Memory */
+ "BASE_EVENT", /**< Base Event */
+ "BASE_CTX", /**< Base Context */
+ "BASE_PM" /**< Base Power Management */
+};
+
+#define MODULE_STRING_ARRAY_SIZE (sizeof(kbasep_str_modules)/sizeof(kbasep_str_modules[0]))
+
+const char *kbasep_debug_module_to_str(const kbase_module module)
+{
+ if (MODULE_STRING_ARRAY_SIZE <= module)
+ return "";
+
+ return kbasep_str_modules[module];
+}
+KBASE_EXPORT_SYMBOL(kbasep_debug_module_to_str);
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_defs.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_defs.h
new file mode 100755
index 00000000000..67ad3608bb4
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_defs.h
@@ -0,0 +1,787 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_defs.h
+ *
+ * Defintions (types, defines, etcs) common to Kbase. They are placed here to
+ * allow the hierarchy of header files to work.
+ */
+
+#ifndef _KBASE_DEFS_H_
+#define _KBASE_DEFS_H_
+
+#include <kbase/mali_kbase_config.h>
+#include <kbase/mali_base_hwconfig.h>
+#include <kbase/src/common/mali_kbase_mem_lowlevel.h>
+#include <kbase/src/common/mali_kbase_mem_alloc.h>
+
+
+#include <linux/atomic.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_KDS
+#include <linux/kds.h>
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_SYNC
+#include <linux/sync.h>
+#endif /* CONFIG_SYNC */
+
+/** Enable SW tracing when set */
+#ifdef CONFIG_MALI_T6XX_ENABLE_TRACE
+#define KBASE_TRACE_ENABLE 1
+#endif
+
+#ifndef KBASE_TRACE_ENABLE
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_TRACE_ENABLE 1
+#else
+#define KBASE_TRACE_ENABLE 0
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* KBASE_TRACE_ENABLE */
+
+/** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */
+#define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1
+
+/**
+ * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware.
+ * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU
+ * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware
+ * before resetting.
+ */
+#define ZAP_TIMEOUT 1000
+
+/** Number of milliseconds before we time out on a GPU soft/hard reset */
+#define RESET_TIMEOUT 500
+
+/**
+ * Prevent soft-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * Therefore, soft stop may still be disabled due to HW issues.
+ *
+ * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
+
+/**
+ * Prevent hard-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
+
+/* Forward declarations+defintions */
+typedef struct kbase_context kbase_context;
+typedef struct kbase_jd_atom kbasep_jd_atom;
+typedef struct kbase_device kbase_device;
+
+/**
+ * The maximum number of Job Slots to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of job slots.
+ */
+#define BASE_JM_MAX_NR_SLOTS 16
+
+/**
+ * The maximum number of Address Spaces to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of Address Spaces
+ */
+#define BASE_MAX_NR_AS 16
+
+/* mmu */
+#define ENTRY_IS_ATE 1ULL
+#define ENTRY_IS_INVAL 2ULL
+#define ENTRY_IS_PTE 3ULL
+
+#define MIDGARD_MMU_VA_BITS 48
+
+#define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */
+#define ENTRY_RD_BIT (1ULL << 6)
+#define ENTRY_WR_BIT (1ULL << 7)
+#define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+#define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT)
+
+#if MIDGARD_MMU_VA_BITS > 39
+#define MIDGARD_MMU_TOPLEVEL 0
+#else
+#define MIDGARD_MMU_TOPLEVEL 1
+#endif
+
+#define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW | KBASE_REG_ZONE_TMEM)
+#define GROWABLE_FLAGS_MASK (GROWABLE_FLAGS_REQUIRED | KBASE_REG_FREE)
+
+/** setting in kbase_context::as_nr that indicates it's invalid */
+#define KBASEP_AS_NR_INVALID (-1)
+
+#define KBASE_LOCK_REGION_MAX_SIZE (63)
+#define KBASE_LOCK_REGION_MIN_SIZE (11)
+
+#define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */
+#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
+#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
+
+#include "mali_kbase_js_defs.h"
+
+/**
+ * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
+ * handles retaining cores for power management and affinity management.
+ *
+ * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
+ * where lots of atoms could be submitted before powerup, and each has an
+ * affinity chosen that causes other atoms to have an affinity
+ * violation. Whilst the affinity was not causing violations at the time it
+ * was chosen, it could cause violations thereafter. For example, 1000 jobs
+ * could have had their affinity chosen during the powerup time, so any of
+ * those 1000 jobs could cause an affinity violation later on.
+ *
+ * The attack would otherwise occur because other atoms/contexts have to wait for:
+ * -# the currently running atoms (which are causing the violation) to
+ * finish
+ * -# and, the atoms that had their affinity chosen during powerup to
+ * finish. These are run preferrentially because they don't cause a
+ * violation, but instead continue to cause the violation in others.
+ * -# or, the attacker is scheduled out (which might not happen for just 2
+ * contexts)
+ *
+ * By re-choosing the affinity (which is designed to avoid violations at the
+ * time it's chosen), we break condition (2) of the wait, which minimizes the
+ * problem to just waiting for current jobs to finish (which can be bounded if
+ * the Job Scheduling Policy has a timer).
+ */
+typedef enum {
+ /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
+ KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
+ /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
+ /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
+ /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
+ KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
+ /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
+ KBASE_ATOM_COREREF_STATE_READY
+} kbase_atom_coreref_state;
+
+typedef enum {
+ /** Atom is not used */
+ KBASE_JD_ATOM_STATE_UNUSED,
+ /** Atom is queued in JD */
+ KBASE_JD_ATOM_STATE_QUEUED,
+ /** Atom has been given to JS (is runnable/running) */
+ KBASE_JD_ATOM_STATE_IN_JS,
+ /** Atom has been completed, but not yet handed back to userspace */
+ KBASE_JD_ATOM_STATE_COMPLETED
+} kbase_jd_atom_state;
+
+/** Atom has been previously soft-stoppped */
+#define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
+/** Atom has been previously retried to execute */
+#define KBASE_KATOM_FLAGS_RERUN (1<<2)
+
+typedef struct kbase_jd_atom kbase_jd_atom;
+
+struct kbase_jd_atom {
+ struct work_struct work;
+ ktime_t start_timestamp;
+
+ base_jd_udata udata;
+ kbase_context *kctx;
+
+ struct list_head dep_head[2];
+ struct list_head dep_item[2];
+ struct kbase_jd_atom *dep_atom[2];
+
+ u16 nr_extres;
+ struct base_external_resource *extres;
+
+ u32 device_nr;
+ u64 affinity;
+ u64 jc;
+ kbase_atom_coreref_state coreref_state;
+#ifdef CONFIG_KDS
+ struct list_head node;
+ struct kds_resource_set *kds_rset;
+ mali_bool kds_dep_satisfied;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_SYNC
+ struct sync_fence *fence;
+ struct sync_fence_waiter sync_waiter;
+#endif /* CONFIG_SYNC */
+
+ /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
+ base_jd_event_code event_code;
+ base_jd_core_req core_req; /**< core requirements */
+ /** Job Slot to retry submitting to if submission from IRQ handler failed
+ *
+ * NOTE: see if this can be unified into the another member e.g. the event */
+ int retry_submit_on_slot;
+
+ kbasep_js_policy_job_info sched_info;
+ /* atom priority scaled to nice range with +20 offset 0..39 */
+ int nice_prio;
+
+ int poking; /* BASE_HW_ISSUE_8316 */
+
+ wait_queue_head_t completed;
+ kbase_jd_atom_state status;
+#ifdef CONFIG_GPU_TRACEPOINTS
+ int work_id;
+#endif
+ /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */
+ int slot_nr;
+
+ u32 atom_flags;
+};
+
+/*
+ * Theory of operations:
+ *
+ * Atom objects are statically allocated within the context structure.
+ *
+ * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set.
+ */
+
+#define KBASE_JD_DEP_QUEUE_SIZE 256
+
+typedef struct kbase_jd_context {
+ struct mutex lock;
+ kbasep_js_kctx_info sched_info;
+ kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
+
+ /** Tracks all job-dispatch jobs. This includes those not tracked by
+ * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
+ u32 job_nr;
+
+ /** Waitq that reflects whether there are no jobs (including SW-only
+ * dependency jobs). This is set when no jobs are present on the ctx,
+ * and clear when there are jobs.
+ *
+ * @note: Job Dispatcher knows about more jobs than the Job Scheduler:
+ * the Job Scheduler is unaware of jobs that are blocked on dependencies,
+ * and SW-only dependency jobs.
+ *
+ * This waitq can be waited upon to find out when the context jobs are all
+ * done/cancelled (including those that might've been blocked on
+ * dependencies) - and so, whether it can be terminated. However, it should
+ * only be terminated once it is neither present in the policy-queue (see
+ * kbasep_js_policy_try_evict_ctx() ) nor the run-pool (see
+ * kbasep_js_kctx_info::ctx::is_scheduled).
+ *
+ * Since the waitq is only set under kbase_jd_context::lock,
+ * the waiter should also briefly obtain and drop kbase_jd_context::lock to
+ * guarentee that the setter has completed its work on the kbase_context
+ *
+ * This must be updated atomically with:
+ * - kbase_jd_context::job_nr */
+ wait_queue_head_t zero_jobs_wait;
+
+ /** Job Done workqueue. */
+ struct workqueue_struct *job_done_wq;
+
+ spinlock_t tb_lock;
+ u32 *tb;
+ size_t tb_wrap_offset;
+
+#ifdef CONFIG_KDS
+ struct kds_callback kds_cb;
+#endif /* CONFIG_KDS */
+#ifdef CONFIG_GPU_TRACEPOINTS
+ atomic_t work_id;
+#endif
+} kbase_jd_context;
+
+typedef struct kbase_jm_slot {
+ /* The number of slots must be a power of two */
+#define BASE_JM_SUBMIT_SLOTS 16
+#define BASE_JM_SUBMIT_SLOTS_MASK (BASE_JM_SUBMIT_SLOTS - 1)
+
+ struct kbase_jd_atom *submitted[BASE_JM_SUBMIT_SLOTS];
+
+ kbase_context *last_context;
+
+ u8 submitted_head;
+ u8 submitted_nr;
+
+} kbase_jm_slot;
+
+typedef enum kbase_midgard_type {
+ KBASE_MALI_T601,
+ KBASE_MALI_T604,
+ KBASE_MALI_T608,
+ KBASE_MALI_COUNT
+} kbase_midgard_type;
+
+typedef struct kbase_device_info {
+ kbase_midgard_type dev_type;
+ u32 features;
+} kbase_device_info;
+
+/** Poking state for BASE_HW_ISSUE_8316 */
+enum {
+ KBASE_AS_POKE_STATE_IN_FLIGHT = 1<<0,
+ KBASE_AS_POKE_STATE_KILLING_POKE = 1<<1
+};
+
+/** Poking state for BASE_HW_ISSUE_8316 */
+typedef u32 kbase_as_poke_state;
+
+/**
+ * Important: Our code makes assumptions that a kbase_as structure is always at
+ * kbase_device->as[number]. This is used to recover the containing
+ * kbase_device from a kbase_as structure.
+ *
+ * Therefore, kbase_as structures must not be allocated anywhere else.
+ */
+typedef struct kbase_as {
+ int number;
+
+ struct workqueue_struct *pf_wq;
+ struct work_struct work_pagefault;
+ struct work_struct work_busfault;
+ mali_addr64 fault_addr;
+ struct mutex transaction_mutex;
+
+ /* BASE_HW_ISSUE_8316 */
+ struct workqueue_struct *poke_wq;
+ struct work_struct poke_work;
+ /** Protected by kbasep_js_device_data::runpool_irq::lock */
+ int poke_refcount;
+ /** Protected by kbasep_js_device_data::runpool_irq::lock */
+ kbase_as_poke_state poke_state;
+ struct hrtimer poke_timer;
+} kbase_as;
+
+/* tracking of memory usage */
+typedef struct kbasep_mem_usage {
+ u32 max_pages;
+ atomic_t cur_pages;
+} kbasep_mem_usage;
+
+
+/**
+ * Instrumentation State Machine States
+ */
+typedef enum {
+ /** State where instrumentation is not active */
+ KBASE_INSTR_STATE_DISABLED = 0,
+ /** State machine is active and ready for a command. */
+ KBASE_INSTR_STATE_IDLE,
+ /** Hardware is currently dumping a frame. */
+ KBASE_INSTR_STATE_DUMPING,
+ /** We've requested a clean to occur on a workqueue */
+ KBASE_INSTR_STATE_REQUEST_CLEAN,
+ /** Hardware is currently cleaning and invalidating caches. */
+ KBASE_INSTR_STATE_CLEANING,
+ /** Cache clean completed, and either a) a dump is complete, or
+ * b) instrumentation can now be setup. */
+ KBASE_INSTR_STATE_CLEANED,
+ /** kbasep_reset_timeout_worker() has started (but not compelted) a
+ * reset. This generally indicates the current action should be aborted, and
+ * kbasep_reset_timeout_worker() will handle the cleanup */
+ KBASE_INSTR_STATE_RESETTING,
+ /** An error has occured during DUMPING (page fault). */
+ KBASE_INSTR_STATE_FAULT
+} kbase_instr_state;
+
+typedef struct kbasep_mem_device {
+#ifdef CONFIG_UMP
+ u32 ump_device_id; /* Which UMP device this GPU should be mapped to.
+ Read-only, copied from platform configuration on startup. */
+#endif /* CONFIG_UMP */
+
+ u32 per_process_memory_limit; /* How much memory (in bytes) a single process can access.
+ Read-only, copied from platform configuration on startup. */
+ kbasep_mem_usage usage; /* Tracks usage of OS shared memory. Initialized with platform
+ configuration data, updated when OS memory is allocated/freed. */
+
+} kbasep_mem_device;
+
+
+
+#define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
+
+typedef enum {
+ /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+ * THIS MUST BE USED AT THE START OF THE ENUM */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
+#include "mali_kbase_trace_defs.h"
+#undef KBASE_TRACE_CODE_MAKE_CODE
+ /* Comma on its own, to extend the list */
+ ,
+ /* Must be the last in the enum */
+ KBASE_TRACE_CODE_COUNT
+} kbase_trace_code;
+
+#define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
+#define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1)
+
+typedef struct kbase_trace {
+ struct timespec timestamp;
+ u32 thread_id;
+ u32 cpu;
+ void *ctx;
+ mali_bool katom;
+ int atom_number;
+ u64 atom_udata[2];
+ u64 gpu_addr;
+ u32 info_val;
+ u8 code;
+ u8 jobslot;
+ u8 refcount;
+ u8 flags;
+} kbase_trace;
+
+/** Event IDs for the power management framework.
+ *
+ * Any of these events might be missed, so they should not be relied upon to
+ * find the precise state of the GPU at a particular time in the
+ * trace. Overall, we should get a high percentage of these events for
+ * statisical purposes, and so a few missing should not be a problem */
+typedef enum kbase_timeline_pm_event {
+ /* helper for tests */
+ KBASEP_TIMELINE_PM_EVENT_FIRST,
+
+ /** Event reserved for backwards compatibility with 'init' events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST,
+
+ /** The power state of the device has changed.
+ *
+ * Specifically, the device has reached a desired or available state.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED,
+
+ /** The GPU is becoming active.
+ *
+ * This event is sent when the first context is about to use the GPU.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE,
+
+ /** The GPU is becoming idle.
+ *
+ * This event is sent when the last context has finished using the GPU.
+ */
+ KBASE_TIMELINE_PM_EVENT_GPU_IDLE,
+
+ /** Event reserved for backwards compatibility with 'policy_change'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_4,
+
+ /** Event reserved for backwards compatibility with 'system_suspend'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_5,
+
+ /** Event reserved for backwards compatibility with 'system_resume'
+ * events */
+ KBASE_TIMELINE_PM_EVENT_RESERVED_6,
+
+ /** The job scheduler is requesting to power up/down cores.
+ *
+ * This event is sent when:
+ * - powered down cores are needed to complete a job
+ * - powered up cores are not needed anymore
+ */
+ KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
+
+ KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
+} kbase_timeline_pm_event;
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+typedef struct kbase_trace_kctx_timeline {
+ atomic_t jd_atoms_in_flight;
+ u32 owner_tgid;
+} kbase_trace_kctx_timeline;
+
+typedef struct kbase_trace_kbdev_timeline {
+ /** DebugFS entry */
+ struct dentry *dentry;
+
+ /* Note: strictly speaking, not needed, because it's in sync with
+ * kbase_device::jm_slots[]::submitted_nr
+ *
+ * But it's kept as an example of how to add global timeline tracking
+ * information
+ *
+ * The caller must hold kbasep_js_device_data::runpool_irq::lock when
+ * accessing this */
+ u8 slot_atoms_submitted[BASE_JM_SUBMIT_SLOTS];
+
+ /* Last UID for each PM event */
+ atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1];
+ /* Counter for generating PM event UIDs */
+ atomic_t pm_event_uid_counter;
+ /*
+ * L2 transition state - MALI_TRUE indicates that the transition is ongoing
+ * Expected to be protected by pm.power_change_lock */
+ mali_bool l2_transitioning;
+} kbase_trace_kbdev_timeline;
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+
+typedef struct kbasep_kctx_list_element {
+ struct list_head link;
+ kbase_context *kctx;
+} kbasep_kctx_list_element;
+
+struct kbase_device {
+ /** jm_slots is protected by kbasep_js_device_data::runpool_irq::lock */
+ kbase_jm_slot jm_slots[BASE_JM_MAX_NR_SLOTS];
+ s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS];
+ kbase_os_device osdev;
+ kbase_pm_device_data pm;
+ kbasep_js_device_data js_data;
+ kbasep_mem_device memdev;
+
+ kbase_as as[BASE_MAX_NR_AS];
+
+ spinlock_t mmu_mask_change;
+
+ kbase_gpu_props gpu_props;
+
+ /** List of SW workarounds for HW issues */
+ unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+
+ /* Cached present bitmaps - these are the same as the corresponding hardware registers */
+ u64 shader_present_bitmap;
+ u64 tiler_present_bitmap;
+ u64 l2_present_bitmap;
+ u64 l3_present_bitmap;
+
+ /* Bitmaps of cores that are currently in use (running jobs).
+ * These should be kept up to date by the job scheduler.
+ *
+ * pm.power_change_lock should be held when accessing these members.
+ *
+ * kbase_pm_check_transitions_nolock() should be called when bits are
+ * cleared to update the power management system and allow transitions to
+ * occur. */
+ u64 shader_inuse_bitmap;
+
+ /* Refcount for cores in use */
+ u32 shader_inuse_cnt[64];
+
+ /* Bitmaps of cores the JS needs for jobs ready to run */
+ u64 shader_needed_bitmap;
+
+ /* Refcount for cores needed */
+ u32 shader_needed_cnt[64];
+
+ u32 tiler_inuse_cnt;
+
+ u32 tiler_needed_cnt;
+
+ /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */
+ u32 l2_users_count;
+
+ /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be
+ * submitted to these cores. These are updated by the power management code. The job scheduler should avoid
+ * submitting new jobs to any cores that are not marked as available.
+ *
+ * pm.power_change_lock should be held when accessing these members.
+ */
+ u64 shader_available_bitmap;
+ u64 tiler_available_bitmap;
+ u64 l2_available_bitmap;
+
+ u64 shader_ready_bitmap;
+ u64 shader_transitioning_bitmap;
+
+ s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */
+ s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */
+
+ /* Structure used for instrumentation and HW counters dumping */
+ struct {
+ /* The lock should be used when accessing any of the following members */
+ spinlock_t lock;
+
+ kbase_context *kctx;
+ u64 addr;
+ wait_queue_head_t wait;
+ int triggered;
+ kbase_instr_state state;
+ wait_queue_head_t cache_clean_wait;
+ struct workqueue_struct *cache_clean_wq;
+ struct work_struct cache_clean_work;
+
+ kbase_context *suspended_kctx;
+ kbase_uk_hwcnt_setup suspended_state;
+ } hwcnt;
+
+ /* Set when we're about to reset the GPU */
+ atomic_t reset_gpu;
+#define KBASE_RESET_GPU_NOT_PENDING 0 /* The GPU reset isn't pending */
+#define KBASE_RESET_GPU_PREPARED 1 /* kbase_prepare_to_reset_gpu has been called */
+#define KBASE_RESET_GPU_COMMITTED 2 /* kbase_reset_gpu has been called - the reset will now definitely happen
+ * within the timeout period */
+#define KBASE_RESET_GPU_HAPPENING 3 /* The GPU reset process is currently occuring (timeout has expired or
+ * kbasep_try_reset_gpu_early was called) */
+
+ /* Work queue and work item for performing the reset in */
+ struct workqueue_struct *reset_workq;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ struct hrtimer reset_timer;
+
+ /*value to be written to the irq_throttle register each time an irq is served */
+ atomic_t irq_throttle_cycles;
+
+ const kbase_attribute *config_attributes;
+
+ /* >> BASE_HW_ISSUE_8401 >> */
+#define KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT 3
+ kbase_context *workaround_kctx;
+ void *workaround_compute_job_va[KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT];
+ phys_addr_t workaround_compute_job_pa[KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT];
+ /* << BASE_HW_ISSUE_8401 << */
+
+#if KBASE_TRACE_ENABLE != 0
+ spinlock_t trace_lock;
+ u16 trace_first_out;
+ u16 trace_next_in;
+ kbase_trace *trace_rbuf;
+#endif
+
+#if MALI_CUSTOMER_RELEASE == 0
+ /* This is used to override the current job scheduler values for
+ * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_SS
+ * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS
+ * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS
+ * KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS
+ * KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS.
+ *
+ * These values are set via the js_timeouts sysfs file.
+ */
+ u32 js_soft_stop_ticks;
+ u32 js_hard_stop_ticks_ss;
+ u32 js_hard_stop_ticks_nss;
+ u32 js_reset_ticks_ss;
+ u32 js_reset_ticks_nss;
+#endif
+
+ struct mutex cacheclean_lock;
+
+ /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */
+ void *platform_context;
+
+ /** Count of contexts keeping the GPU powered */
+ atomic_t keep_gpu_powered_count;
+
+ /* List of kbase_contexts created */
+ struct list_head kctx_list;
+ struct mutex kctx_list_lock;
+
+#ifdef CONFIG_MALI_T6XX_RT_PM
+ struct delayed_work runtime_pm_workqueue;
+#endif
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ kbase_trace_kbdev_timeline timeline;
+#endif
+};
+
+struct kbase_context {
+ kbase_device *kbdev;
+ phys_addr_t pgd;
+ struct list_head event_list;
+ struct mutex event_mutex;
+ mali_bool event_closed;
+ struct workqueue_struct *event_workq;
+
+ atomic_t setup_complete;
+ atomic_t setup_in_progress;
+
+ mali_bool keep_gpu_powered;
+
+ u64 *mmu_teardown_pages;
+
+ struct mutex reg_lock; /* To be converted to a rwlock? */
+ struct rb_root reg_rbtree; /* Red-Black tree of GPU regions (live regions) */
+
+ kbase_os_context osctx;
+ kbase_jd_context jctx;
+ kbasep_mem_usage usage;
+ atomic_t nonmapped_pages;
+
+ kbase_mem_allocator osalloc;
+ kbase_mem_allocator * pgd_allocator;
+
+ struct list_head waiting_soft_jobs;
+#ifdef CONFIG_KDS
+ struct list_head waiting_kds_resource;
+#endif
+ /** This is effectively part of the Run Pool, because it only has a valid
+ * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in
+ *
+ * The kbasep_js_device_data::runpool_irq::lock must be held whilst accessing
+ * this.
+ *
+ * If the context relating to this as_nr is required, you must use
+ * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear
+ * whilst you're using it. Alternatively, just hold the kbasep_js_device_data::runpool_irq::lock
+ * to ensure the context doesn't disappear (but this has restrictions on what other locks
+ * you can take whilst doing this) */
+ int as_nr;
+
+ /* NOTE:
+ *
+ * Flags are in jctx.sched_info.ctx.flags
+ * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
+ *
+ * All other flags must be added there */
+ spinlock_t mm_update_lock;
+ struct mm_struct * process_mm;
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ kbase_trace_kctx_timeline timeline;
+#endif
+};
+
+typedef enum kbase_reg_access_type {
+ REG_READ,
+ REG_WRITE
+} kbase_reg_access_type;
+
+typedef enum kbase_share_attr_bits {
+ /* (1ULL << 8) bit is reserved */
+ SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */
+ SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */
+} kbase_share_attr_bits;
+
+/* Conversion helpers for setting up high resolution timers */
+#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime((x)*1000000U))
+#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
+
+/* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
+#define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
+/* Maximum number of loops polling the GPU for an AS flush to complete before we assume the GPU has hung */
+#define KBASE_AS_FLUSH_MAX_LOOPS 100000
+
+#endif /* _KBASE_DEFS_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_device.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_device.c
new file mode 100755
index 00000000000..23df622a46b
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_device.c
@@ -0,0 +1,691 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_device.c
+ * Base kernel device APIs
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_kbase_defs.h>
+#include <kbase/src/common/mali_kbase_hw.h>
+
+/* NOTE: Magic - 0x45435254 (TRCE in ASCII).
+ * Supports tracing feature provided in the base module.
+ * Please keep it in sync with the value of base module.
+ */
+#define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
+
+#ifdef CONFIG_MALI_PLATFORM_VEXPRESS
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+extern kbase_attribute config_attributes_hw_issue_8408[];
+#endif /* CONFIG_MALI_PLATFORM_FAKE */
+#endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
+
+#if KBASE_TRACE_ENABLE != 0
+STATIC CONST char *kbasep_trace_code_string[] = {
+ /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+ * THIS MUST BE USED AT THE START OF THE ARRAY */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) # X
+#include "mali_kbase_trace_defs.h"
+#undef KBASE_TRACE_CODE_MAKE_CODE
+};
+#endif
+
+#define DEBUG_MESSAGE_SIZE 256
+
+STATIC mali_error kbasep_trace_init(kbase_device *kbdev);
+STATIC void kbasep_trace_term(kbase_device *kbdev);
+STATIC void kbasep_trace_hook_wrapper(void *param);
+#if KBASE_TRACE_ENABLE != 0
+STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev);
+#endif
+
+void kbasep_as_do_poke(struct work_struct *work);
+enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *data);
+void kbasep_reset_timeout_worker(struct work_struct *data);
+
+kbase_device *kbase_device_alloc(void)
+{
+ return kzalloc(sizeof(kbase_device), GFP_KERNEL);
+}
+
+mali_error kbase_device_init(kbase_device * const kbdev)
+{
+ int i; /* i used after the for loop, don't reuse ! */
+
+ spin_lock_init(&kbdev->mmu_mask_change);
+
+ /* Initialize platform specific context */
+ if (MALI_FALSE == kbasep_platform_device_init(kbdev))
+ goto fail;
+
+ /* Ensure we can access the GPU registers */
+ kbase_pm_register_access_enable(kbdev);
+
+ /* Find out GPU properties based on the GPU feature registers */
+ kbase_gpuprops_set(kbdev);
+
+ /* Get the list of workarounds for issues on the current HW (identified by the GPU_ID register) */
+ if (MALI_ERROR_NONE != kbase_hw_set_issues_mask(kbdev)) {
+ kbase_pm_register_access_disable(kbdev);
+ goto free_platform;
+ }
+
+ kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
+
+ /* We're done accessing the GPU registers for now. */
+ kbase_pm_register_access_disable(kbdev);
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ const char format[] = "mali_mmu%d";
+ char name[sizeof(format)];
+ const char poke_format[] = "mali_mmu%d_poker"; /* BASE_HW_ISSUE_8316 */
+ char poke_name[sizeof(poke_format)]; /* BASE_HW_ISSUE_8316 */
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+ if (0 > snprintf(poke_name, sizeof(poke_name), poke_format, i))
+ goto free_workqs;
+ }
+
+ if (0 > snprintf(name, sizeof(name), format, i))
+ goto free_workqs;
+
+ kbdev->as[i].number = i;
+ kbdev->as[i].fault_addr = 0ULL;
+
+ kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
+ if (NULL == kbdev->as[i].pf_wq)
+ goto free_workqs;
+
+ mutex_init(&kbdev->as[i].transaction_mutex);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+ struct hrtimer *poking_timer = &kbdev->as[i].poke_timer;
+
+ kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
+ if (NULL == kbdev->as[i].poke_wq) {
+ destroy_workqueue(kbdev->as[i].pf_wq);
+ goto free_workqs;
+ }
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&kbdev->as[i].poke_work));
+ INIT_WORK(&kbdev->as[i].poke_work, kbasep_as_do_poke);
+
+ hrtimer_init(poking_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ poking_timer->function = kbasep_as_poke_timer_callback;
+
+ kbdev->as[i].poke_refcount = 0;
+ kbdev->as[i].poke_state = 0u;
+ }
+ }
+ /* don't change i after this point */
+
+ spin_lock_init(&kbdev->hwcnt.lock);
+
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED;
+ init_waitqueue_head(&kbdev->reset_wait);
+ init_waitqueue_head(&kbdev->hwcnt.wait);
+ init_waitqueue_head(&kbdev->hwcnt.cache_clean_wait);
+ INIT_WORK(&kbdev->hwcnt.cache_clean_work, kbasep_cache_clean_worker);
+ kbdev->hwcnt.triggered = 0;
+
+ kbdev->hwcnt.cache_clean_wq = alloc_workqueue("Mali cache cleaning workqueue",
+ 0, 1);
+ if (NULL == kbdev->hwcnt.cache_clean_wq)
+ goto free_workqs;
+
+ kbdev->reset_workq = alloc_workqueue("Mali reset workqueue", 0, 1);
+ if (NULL == kbdev->reset_workq)
+ goto free_cache_clean_workq;
+
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&kbdev->reset_work));
+ INIT_WORK(&kbdev->reset_work, kbasep_reset_timeout_worker);
+
+ hrtimer_init(&kbdev->reset_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kbdev->reset_timer.function = kbasep_reset_timer_callback;
+
+ if (kbasep_trace_init(kbdev) != MALI_ERROR_NONE)
+ goto free_reset_workq;
+
+ mutex_init(&kbdev->cacheclean_lock);
+ atomic_set(&kbdev->keep_gpu_powered_count, 0);
+
+#ifdef CONFIG_MALI_TRACE_TIMELINE
+ for (i = 0; i < BASE_JM_SUBMIT_SLOTS; ++i)
+ kbdev->timeline.slot_atoms_submitted[i] = 0;
+
+ for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
+ atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
+#endif /* CONFIG_MALI_TRACE_TIMELINE */
+
+ kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
+
+#ifdef CONFIG_MALI_PLATFORM_VEXPRESS
+#ifdef CONFIG_MALI_PLATFORM_FAKE
+ /* BASE_HW_ISSUE_8408 requires a configuration with different timeouts for
+ * the vexpress platform */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+ kbdev->config_attributes = config_attributes_hw_issue_8408;
+#endif /* CONFIG_MALI_PLATFORM_FAKE */
+#endif /* CONFIG_MALI_PLATFORM_VEXPRESS */
+
+ return MALI_ERROR_NONE;
+
+ free_reset_workq:
+ destroy_workqueue(kbdev->reset_workq);
+ free_cache_clean_workq:
+ destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
+ free_workqs:
+ while (i > 0) {
+ i--;
+ destroy_workqueue(kbdev->as[i].pf_wq);
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ destroy_workqueue(kbdev->as[i].poke_wq);
+ }
+ free_platform:
+ kbasep_platform_device_term(kbdev);
+ fail:
+ return MALI_ERROR_FUNCTION_FAILED;
+}
+
+void kbase_device_term(kbase_device *kbdev)
+{
+ int i;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+#if KBASE_TRACE_ENABLE != 0
+ kbase_debug_assert_register_hook(NULL, NULL);
+#endif
+
+ kbasep_trace_term(kbdev);
+
+ destroy_workqueue(kbdev->reset_workq);
+ destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
+
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ destroy_workqueue(kbdev->as[i].pf_wq);
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ destroy_workqueue(kbdev->as[i].poke_wq);
+ }
+
+ kbasep_platform_device_term(kbdev);
+}
+
+void kbase_device_free(kbase_device *kbdev)
+{
+ kfree(kbdev);
+}
+
+void kbase_device_trace_buffer_install(kbase_context *kctx, u32 *tb, size_t size)
+{
+ unsigned long flags;
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(tb);
+
+ /* set up the header */
+ /* magic number in the first 4 bytes */
+ tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
+ /* Store (write offset = 0, wrap counter = 0, transaction active = no)
+ * write offset 0 means never written.
+ * Offsets 1 to (wrap_offset - 1) used to store values when trace started
+ */
+ tb[1] = 0;
+
+ /* install trace buffer */
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ kctx->jctx.tb_wrap_offset = size / 8;
+ kctx->jctx.tb = tb;
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+}
+
+void kbase_device_trace_buffer_uninstall(kbase_context *kctx)
+{
+ unsigned long flags;
+ KBASE_DEBUG_ASSERT(kctx);
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ kctx->jctx.tb = NULL;
+ kctx->jctx.tb_wrap_offset = 0;
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+}
+
+void kbase_device_trace_register_access(kbase_context *kctx, kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
+ if (kctx->jctx.tb) {
+ u16 wrap_count;
+ u16 write_offset;
+ u32 *tb = kctx->jctx.tb;
+ u32 header_word;
+
+ header_word = tb[1];
+ KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
+
+ wrap_count = (header_word >> 1) & 0x7FFF;
+ write_offset = (header_word >> 16) & 0xFFFF;
+
+ /* mark as transaction in progress */
+ tb[1] |= 0x1;
+ mb();
+
+ /* calculate new offset */
+ write_offset++;
+ if (write_offset == kctx->jctx.tb_wrap_offset) {
+ /* wrap */
+ write_offset = 1;
+ wrap_count++;
+ wrap_count &= 0x7FFF; /* 15bit wrap counter */
+ }
+
+ /* store the trace entry at the selected offset */
+ tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
+ tb[write_offset * 2 + 1] = reg_value;
+ mb();
+
+ /* new header word */
+ header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */
+ tb[1] = header_word;
+ }
+ spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
+}
+
+void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
+ KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "w: reg %04x val %08x", offset, value);
+ kbase_os_reg_write(kbdev, offset, value);
+ if (kctx && kctx->jctx.tb)
+ kbase_device_trace_register_access(kctx, REG_WRITE, offset, value);
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_write)
+
+u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx)
+{
+ u32 val;
+ KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
+ KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
+ val = kbase_os_reg_read(kbdev, offset);
+ KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "r: reg %04x val %08x", offset, val);
+ if (kctx && kctx->jctx.tb)
+ kbase_device_trace_register_access(kctx, REG_READ, offset, val);
+ return val;
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_read)
+
+void kbase_report_gpu_fault(kbase_device *kbdev, int multiple)
+{
+ u32 status;
+ u64 address;
+
+ status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL);
+ address = (u64) kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32;
+ address |= kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
+
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "GPU Fault 0x%08x (%s) at 0x%016llx", status, kbase_exception_name(status), address);
+ if (multiple)
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "There were multiple GPU faults - some have not been reported\n");
+}
+
+void kbase_gpu_interrupt(kbase_device *kbdev, u32 val)
+{
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
+ if (val & GPU_FAULT)
+ kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
+
+ if (val & RESET_COMPLETED)
+ kbase_pm_reset_done(kbdev);
+
+ if (val & PRFCNT_SAMPLE_COMPLETED)
+ kbase_instr_hwcnt_sample_done(kbdev);
+
+ if (val & CLEAN_CACHES_COMPLETED)
+ kbase_clean_caches_done(kbdev);
+
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL);
+
+ /* kbase_pm_check_transitions must be called after the IRQ has been cleared. This is because it might trigger
+ * further power transitions and we don't want to miss the interrupt raised to notify us that these further
+ * transitions have finished.
+ */
+ if (val & POWER_CHANGED_ALL) {
+ mali_bool cores_are_available;
+ unsigned long flags;
+
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
+
+ if (cores_are_available) {
+ /* Fast-path Job Scheduling on PM IRQ */
+ int js;
+ /* Log timelining information that a change in state has completed */
+ kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+
+ spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
+ /* A simplified check to ensure the last context hasn't exited
+ * after dropping the PM lock whilst doing a PM IRQ: any bits set
+ * in 'submit_allowed' indicate that we have a context in the
+ * runpool (which can't leave whilst we hold this lock). It is
+ * sometimes zero even when we have a context in the runpool, but
+ * that's no problem because we'll be unable to submit jobs
+ * anyway */
+ if (kbdev->js_data.runpool_irq.submit_allowed)
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; ++js) {
+ mali_bool needs_retry;
+ s8 submitted_count = 0;
+ needs_retry = kbasep_js_try_run_next_job_on_slot_irq_nolock(kbdev, js, &submitted_count);
+ /* Don't need to retry outside of IRQ context - this can
+ * only happen if we submitted too many in one IRQ, such
+ * that they were completing faster than we could
+ * submit. In this case, a job IRQ will fire to cause more
+ * work to be submitted in some way */
+ CSTD_UNUSED(needs_retry);
+ }
+ spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
+ }
+ }
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
+}
+
+/*
+ * Device trace functions
+ */
+#if KBASE_TRACE_ENABLE != 0
+
+STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
+{
+ void *rbuf;
+
+ rbuf = kmalloc(sizeof(kbase_trace) * KBASE_TRACE_SIZE, GFP_KERNEL);
+
+ if (!rbuf)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ kbdev->trace_rbuf = rbuf;
+ spin_lock_init(&kbdev->trace_lock);
+ kbasep_trace_debugfs_init(kbdev);
+ return MALI_ERROR_NONE;
+}
+
+STATIC void kbasep_trace_term(kbase_device *kbdev)
+{
+ kfree(kbdev->trace_rbuf);
+}
+
+void kbasep_trace_format_msg(kbase_trace *trace_msg, char *buffer, int len)
+{
+ s32 written = 0;
+
+ /* Initial part of message */
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
+
+ if (trace_msg->katom != MALI_FALSE) {
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
+ }
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
+
+ /* NOTE: Could add function callbacks to handle different message types */
+ /* Jobslot present */
+ if ((trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT) != MALI_FALSE)
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+ /* Refcount present */
+ if ((trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT) != MALI_FALSE)
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
+
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+ /* Rest of message */
+ written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8x", trace_msg->info_val), 0);
+
+}
+
+void kbasep_trace_dump_msg(kbase_trace *trace_msg)
+{
+ char buffer[DEBUG_MESSAGE_SIZE];
+
+ kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+ KBASE_DEBUG_PRINT(KBASE_CORE, "%s", buffer);
+}
+
+void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, u32 info_val)
+{
+ unsigned long irqflags;
+ kbase_trace *trace_msg;
+
+ spin_lock_irqsave(&kbdev->trace_lock, irqflags);
+
+ trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
+
+ /* Fill the message */
+ trace_msg->thread_id = task_pid_nr(current);
+ trace_msg->cpu = task_cpu(current);
+
+ getnstimeofday(&trace_msg->timestamp);
+
+ trace_msg->code = code;
+ trace_msg->ctx = ctx;
+
+ if (NULL == katom) {
+ trace_msg->katom = MALI_FALSE;
+ } else {
+ trace_msg->katom = MALI_TRUE;
+ trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
+ trace_msg->atom_udata[0] = katom->udata.blob[0];
+ trace_msg->atom_udata[1] = katom->udata.blob[1];
+ }
+
+ trace_msg->gpu_addr = gpu_addr;
+ trace_msg->jobslot = jobslot;
+ trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
+ trace_msg->info_val = info_val;
+ trace_msg->flags = flags;
+
+ /* Update the ringbuffer indices */
+ kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
+ if (kbdev->trace_next_in == kbdev->trace_first_out)
+ kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
+
+ /* Done */
+
+ spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
+}
+
+void kbasep_trace_clear(kbase_device *kbdev)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ kbdev->trace_first_out = kbdev->trace_next_in;
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+}
+
+void kbasep_trace_dump(kbase_device *kbdev)
+{
+ unsigned long flags;
+ u32 start;
+ u32 end;
+
+ KBASE_DEBUG_PRINT(KBASE_CORE, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ start = kbdev->trace_first_out;
+ end = kbdev->trace_next_in;
+
+ while (start != end) {
+ kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
+ kbasep_trace_dump_msg(trace_msg);
+
+ start = (start + 1) & KBASE_TRACE_MASK;
+ }
+ KBASE_DEBUG_PRINT(KBASE_CORE, "TRACE_END");
+
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+ KBASE_TRACE_CLEAR(kbdev);
+}
+
+STATIC void kbasep_trace_hook_wrapper(void *param)
+{
+ kbase_device *kbdev = (kbase_device *) param;
+ kbasep_trace_dump(kbdev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct trace_seq_state {
+ kbase_trace trace_buf[KBASE_TRACE_SIZE];
+ u32 start;
+ u32 end;
+};
+
+void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct trace_seq_state *state = s->private;
+ int i;
+
+ i = (state->start + *pos) & KBASE_TRACE_MASK;
+ if (i >= state-> end)
+ return NULL;
+
+ return state;
+}
+
+void kbasep_trace_seq_stop(struct seq_file *s, void *data)
+{
+}
+
+void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
+{
+ struct trace_seq_state *state = s->private;
+ int i;
+
+ (*pos)++;
+
+ i = (state->start + *pos) & KBASE_TRACE_MASK;
+ if (i >= state->end)
+ return NULL;
+
+ return &state->trace_buf[i];
+}
+
+int kbasep_trace_seq_show(struct seq_file *s, void *data)
+{
+ kbase_trace *trace_msg = data;
+ char buffer[DEBUG_MESSAGE_SIZE];
+
+ kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+ seq_printf(s, "%s\n", buffer);
+ return 0;
+}
+
+static const struct seq_operations kbasep_trace_seq_ops = {
+ .start = kbasep_trace_seq_start,
+ .next = kbasep_trace_seq_next,
+ .stop = kbasep_trace_seq_stop,
+ .show = kbasep_trace_seq_show,
+};
+
+static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
+{
+ kbase_device *kbdev = inode->i_private;
+ unsigned long flags;
+
+ struct trace_seq_state *state;
+
+ state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
+ if (!state)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&kbdev->trace_lock, flags);
+ state->start = kbdev->trace_first_out;
+ state->end = kbdev->trace_next_in;
+ memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
+ spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+ return 0;
+}
+
+static const struct file_operations kbasep_trace_debugfs_fops = {
+ .open = kbasep_trace_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
+{
+ debugfs_create_file("mali_trace", S_IRUGO, NULL, kbdev, &kbasep_trace_debugfs_fops);
+}
+#else
+STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
+{
+
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#else /* KBASE_TRACE_ENABLE != 0 */
+STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+ return MALI_ERROR_NONE;
+}
+
+STATIC void kbasep_trace_term(kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+STATIC void kbasep_trace_hook_wrapper(void *param)
+{
+ CSTD_UNUSED(param);
+}
+
+void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, u32 info_val)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(code);
+ CSTD_UNUSED(ctx);
+ CSTD_UNUSED(katom);
+ CSTD_UNUSED(gpu_addr);
+ CSTD_UNUSED(flags);
+ CSTD_UNUSED(refcount);
+ CSTD_UNUSED(jobslot);
+ CSTD_UNUSED(info_val);
+}
+
+void kbasep_trace_clear(kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbasep_trace_dump(kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+#endif /* KBASE_TRACE_ENABLE != 0 */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_event.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_event.c
new file mode 100755
index 00000000000..7f6834cc138
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_event.c
@@ -0,0 +1,174 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <kbase/src/common/mali_kbase.h>
+
+#define beenthere(f, a...) pr_debug("%s:" f, __func__, ##a)
+
+STATIC base_jd_udata kbase_event_process(kbase_context *kctx, kbase_jd_atom *katom)
+{
+ base_jd_udata data;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+ data = katom->udata;
+
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight));
+
+ katom->status = KBASE_JD_ATOM_STATE_UNUSED;
+
+ wake_up(&katom->completed);
+
+ return data;
+}
+
+int kbase_event_pending(kbase_context *ctx)
+{
+ int ret;
+
+ KBASE_DEBUG_ASSERT(ctx);
+
+ mutex_lock(&ctx->event_mutex);
+ ret = (!list_empty(&ctx->event_list)) || (MALI_TRUE == ctx->event_closed);
+ mutex_unlock(&ctx->event_mutex);
+
+ return ret;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_pending)
+
+int kbase_event_dequeue(kbase_context *ctx, base_jd_event_v2 *uevent)
+{
+ kbase_jd_atom *atom;
+
+ KBASE_DEBUG_ASSERT(ctx);
+
+ mutex_lock(&ctx->event_mutex);
+
+ if (list_empty(&ctx->event_list)) {
+ if (ctx->event_closed) {
+ /* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
+ mutex_unlock(&ctx->event_mutex);
+ uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
+ memset(&uevent->udata, 0, sizeof(uevent->udata));
+ beenthere("event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n", BASE_JD_EVENT_DRV_TERMINATED);
+ return 0;
+ } else {
+ mutex_unlock(&ctx->event_mutex);
+ return -1;
+ }
+ }
+
+ /* normal event processing */
+ atom = list_entry(ctx->event_list.next, kbase_jd_atom, dep_item[0]);
+ list_del(ctx->event_list.next);
+
+ mutex_unlock(&ctx->event_mutex);
+
+ beenthere("event dequeuing %p\n", (void *)atom);
+ uevent->event_code = atom->event_code;
+ uevent->atom_number = (atom - ctx->jctx.atoms);
+ uevent->udata = kbase_event_process(ctx, atom);
+
+ return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_dequeue)
+
+static void kbase_event_post_worker(struct work_struct *data)
+{
+ kbase_jd_atom *atom = CONTAINER_OF(data, kbase_jd_atom, work);
+ kbase_context *ctx = atom->kctx;
+
+ if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+ kbase_jd_free_external_resources(atom);
+
+ if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
+ if (atom->event_code == BASE_JD_EVENT_DONE) {
+ /* Don't report the event */
+ kbase_event_process(ctx, atom);
+ return;
+ }
+ }
+
+ mutex_lock(&ctx->event_mutex);
+ list_add_tail(&atom->dep_item[0], &ctx->event_list);
+ mutex_unlock(&ctx->event_mutex);
+
+ kbase_event_wakeup(ctx);
+}
+
+void kbase_event_post(kbase_context *ctx, kbase_jd_atom *atom)
+{
+ KBASE_DEBUG_ASSERT(ctx);
+ KBASE_DEBUG_ASSERT(ctx->event_workq);
+ KBASE_DEBUG_ASSERT(atom);
+
+ INIT_WORK(&atom->work, kbase_event_post_worker);
+ queue_work(ctx->event_workq, &atom->work);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_post)
+
+void kbase_event_close(kbase_context *kctx)
+{
+ mutex_lock(&kctx->event_mutex);
+ kctx->event_closed = MALI_TRUE;
+ mutex_unlock(&kctx->event_mutex);
+ kbase_event_wakeup(kctx);
+}
+
+mali_error kbase_event_init(kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+
+ INIT_LIST_HEAD(&kctx->event_list);
+ mutex_init(&kctx->event_mutex);
+ kctx->event_closed = MALI_FALSE;
+ kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
+
+ if (NULL == kctx->event_workq)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ return MALI_ERROR_NONE;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_init)
+
+void kbase_event_cleanup(kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(kctx->event_workq);
+
+ flush_workqueue(kctx->event_workq);
+ destroy_workqueue(kctx->event_workq);
+
+ /* We use kbase_event_dequeue to remove the remaining events as that
+ * deals with all the cleanup needed for the atoms.
+ *
+ * Note: use of kctx->event_list without a lock is safe because this must be the last
+ * thread using it (because we're about to terminate the lock)
+ */
+ while (!list_empty(&kctx->event_list)) {
+ base_jd_event_v2 event;
+ kbase_event_dequeue(kctx, &event);
+ }
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_cleanup)
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gator.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gator.h
new file mode 100755
index 00000000000..3820bcf07d9
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gator.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+#define GATOR_MAKE_EVENT(type, number) (((type) << 24) | ((number) << 16))
+#define GATOR_JOB_SLOT_START 1
+#define GATOR_JOB_SLOT_STOP 2
+#define GATOR_JOB_SLOT_SOFT_STOPPED 3
+void kbase_trace_mali_job_slots_event(u32 event, const kbase_context *kctx);
+void kbase_trace_mali_pm_status(u32 event, u64 value);
+void kbase_trace_mali_pm_power_off(u32 event, u64 value);
+void kbase_trace_mali_pm_power_on(u32 event, u64 value);
+void kbase_trace_mali_page_fault_insert_pages(int event, u32 value);
+void kbase_trace_mali_mmu_as_in_use(int event);
+void kbase_trace_mali_mmu_as_released(int event);
+void kbase_trace_mali_total_alloc_pages_change(long long int event);
+#endif
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.c
new file mode 100755
index 00000000000..93ceee8c850
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.c
@@ -0,0 +1,314 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpuprops.c
+ * Base kernel property query APIs
+ */
+
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_midg_regmap.h>
+#include <kbase/src/common/mali_kbase_gpuprops.h>
+
+/**
+ * @brief Extracts bits from a 32-bit bitfield.
+ * @hideinitializer
+ *
+ * @param[in] value The value from which to extract bits.
+ * @param[in] offset The first bit to extract (0 being the LSB).
+ * @param[in] size The number of bits to extract.
+ * @return Bits [@a offset, @a offset + @a size) from @a value.
+ *
+ * @pre offset + size <= 32.
+ */
+/* from mali_cdsb.h */
+#define KBASE_UBFX32(value, offset, size) \
+ (((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1))
+
+mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops * const kbase_props)
+{
+ kbase_gpuprops_clock_speed_function get_gpu_speed_mhz;
+ u32 gpu_speed_mhz;
+ int rc = 1;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != kbase_props);
+
+ /* Current GPU speed is requested from the system integrator via the KBASE_CONFIG_ATTR_GPU_SPEED_FUNC function.
+ * If that function fails, or the function is not provided by the system integrator, we report the maximum
+ * GPU speed as specified by KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX.
+ */
+ get_gpu_speed_mhz = (kbase_gpuprops_clock_speed_function) kbasep_get_config_value(kctx->kbdev, kctx->kbdev->config_attributes, KBASE_CONFIG_ATTR_GPU_SPEED_FUNC);
+ if (get_gpu_speed_mhz != NULL) {
+ rc = get_gpu_speed_mhz(&gpu_speed_mhz);
+#ifdef CONFIG_MALI_DEBUG
+ /* Issue a warning message when the reported GPU speed falls outside the min/max range */
+ if (rc == 0) {
+ u32 gpu_speed_khz = gpu_speed_mhz * 1000;
+ if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min || gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max)
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)\n", (unsigned long)gpu_speed_khz, (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min, (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
+ }
+#endif /* CONFIG_MALI_DEBUG */
+ }
+ if (rc != 0)
+ gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000;
+
+ kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz;
+
+ memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props));
+
+ return MALI_ERROR_NONE;
+}
+
+STATIC void kbase_gpuprops_dump_registers(kbase_device *kbdev, kbase_gpuprops_regdump *regdump)
+{
+ int i;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != regdump);
+
+ /* Fill regdump with the content of the relevant registers */
+ regdump->gpu_id = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
+
+ regdump->l2_features = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(L2_FEATURES));
+ regdump->l3_features = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(L3_FEATURES));
+ regdump->tiler_features = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(TILER_FEATURES));
+ regdump->mem_features = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(MEM_FEATURES));
+ regdump->mmu_features = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(MMU_FEATURES));
+ regdump->as_present = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(AS_PRESENT));
+ regdump->js_present = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(JS_PRESENT));
+
+ for (i = 0; i < MIDG_MAX_JOB_SLOTS; i++)
+ regdump->js_features[i] = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(JS_FEATURES_REG(i)));
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ regdump->texture_features[i] = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)));
+
+ regdump->thread_max_threads = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(THREAD_MAX_THREADS));
+ regdump->thread_max_workgroup_size = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE));
+ regdump->thread_max_barrier_size = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE));
+ regdump->thread_features = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(THREAD_FEATURES));
+
+ regdump->shader_present_lo = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PRESENT_LO));
+ regdump->shader_present_hi = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PRESENT_HI));
+
+ regdump->tiler_present_lo = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(TILER_PRESENT_LO));
+ regdump->tiler_present_hi = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(TILER_PRESENT_HI));
+
+ regdump->l2_present_lo = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(L2_PRESENT_LO));
+ regdump->l2_present_hi = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(L2_PRESENT_HI));
+
+ regdump->l3_present_lo = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(L3_PRESENT_LO));
+ regdump->l3_present_hi = kbase_os_reg_read(kbdev, GPU_CONTROL_REG(L3_PRESENT_HI));
+}
+
+STATIC void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props)
+{
+ struct mali_base_gpu_coherent_group *current_group;
+ u64 group_present;
+ u64 group_mask;
+ u64 first_set, first_set_prev;
+ u32 num_groups = 0;
+
+ KBASE_DEBUG_ASSERT(NULL != props);
+
+ props->coherency_info.coherency = props->raw_props.mem_features;
+ props->coherency_info.num_core_groups = hweight64(props->raw_props.l2_present);
+
+ if (props->coherency_info.coherency & GROUPS_L3_COHERENT) {
+ /* Group is l3 coherent */
+ group_present = props->raw_props.l3_present;
+ } else if (props->coherency_info.coherency & GROUPS_L2_COHERENT) {
+ /* Group is l2 coherent */
+ group_present = props->raw_props.l2_present;
+ } else {
+ /* Group is l1 coherent */
+ group_present = props->raw_props.shader_present;
+ }
+
+ /*
+ * The coherent group mask can be computed from the l2/l3 present
+ * register.
+ *
+ * For the coherent group n:
+ * group_mask[n] = (first_set[n] - 1) & ~(first_set[n-1] - 1)
+ * where first_set is group_present with only its nth set-bit kept
+ * (i.e. the position from where a new group starts).
+ *
+ * For instance if the groups are l2 coherent and l2_present=0x0..01111:
+ * The first mask is:
+ * group_mask[1] = (first_set[1] - 1) & ~(first_set[0] - 1)
+ * = (0x0..010 - 1) & ~(0x0..01 - 1)
+ * = 0x0..00f
+ * The second mask is:
+ * group_mask[2] = (first_set[2] - 1) & ~(first_set[1] - 1)
+ * = (0x0..100 - 1) & ~(0x0..010 - 1)
+ * = 0x0..0f0
+ * And so on until all the bits from group_present have been cleared
+ * (i.e. there is no group left).
+ */
+
+ current_group = props->coherency_info.group;
+ first_set = group_present & ~(group_present - 1);
+
+ while (group_present != 0 && num_groups < BASE_MAX_COHERENT_GROUPS) {
+ group_present -= first_set; /* Clear the current group bit */
+ first_set_prev = first_set;
+
+ first_set = group_present & ~(group_present - 1);
+ group_mask = (first_set - 1) & ~(first_set_prev - 1);
+
+ /* Populate the coherent_group structure for each group */
+ current_group->core_mask = group_mask & props->raw_props.shader_present;
+ current_group->num_cores = hweight64(current_group->core_mask);
+
+ num_groups++;
+ current_group++;
+ }
+
+ if (group_present != 0)
+ KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Too many coherent groups (keeping only %d groups).\n", BASE_MAX_COHERENT_GROUPS);
+
+ props->coherency_info.num_groups = num_groups;
+}
+
+/**
+ * @brief Get the GPU configuration
+ *
+ * Fill the base_gpu_props structure with values from the GPU configuration registers.
+ * Only the raw properties are filled in this function
+ *
+ * @param gpu_props The base_gpu_props structure
+ * @param kbdev The kbase_device structure for the device
+ */
+static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, kbase_device *kbdev)
+{
+ kbase_gpuprops_regdump regdump;
+ int i;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != gpu_props);
+
+ /* Dump relevant registers */
+ kbase_gpuprops_dump_registers(kbdev, &regdump);
+
+ gpu_props->raw_props.gpu_id = regdump.gpu_id;
+ gpu_props->raw_props.tiler_features = regdump.tiler_features;
+ gpu_props->raw_props.mem_features = regdump.mem_features;
+ gpu_props->raw_props.mmu_features = regdump.mmu_features;
+ gpu_props->raw_props.l2_features = regdump.l2_features;
+ gpu_props->raw_props.l3_features = regdump.l3_features;
+
+ gpu_props->raw_props.as_present = regdump.as_present;
+ gpu_props->raw_props.js_present = regdump.js_present;
+ gpu_props->raw_props.shader_present = ((u64) regdump.shader_present_hi << 32) + regdump.shader_present_lo;
+ gpu_props->raw_props.tiler_present = ((u64) regdump.tiler_present_hi << 32) + regdump.tiler_present_lo;
+ gpu_props->raw_props.l2_present = ((u64) regdump.l2_present_hi << 32) + regdump.l2_present_lo;
+ gpu_props->raw_props.l3_present = ((u64) regdump.l3_present_hi << 32) + regdump.l3_present_lo;
+
+ for (i = 0; i < MIDG_MAX_JOB_SLOTS; i++)
+ gpu_props->raw_props.js_features[i] = regdump.js_features[i];
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ gpu_props->raw_props.texture_features[i] = regdump.texture_features[i];
+
+ gpu_props->raw_props.thread_max_barrier_size = regdump.thread_max_barrier_size;
+ gpu_props->raw_props.thread_max_threads = regdump.thread_max_threads;
+ gpu_props->raw_props.thread_max_workgroup_size = regdump.thread_max_workgroup_size;
+ gpu_props->raw_props.thread_features = regdump.thread_features;
+}
+
+/**
+ * @brief Calculate the derived properties
+ *
+ * Fill the base_gpu_props structure with values derived from the GPU configuration registers
+ *
+ * @param gpu_props The base_gpu_props structure
+ * @param kbdev The kbase_device structure for the device
+ */
+static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, kbase_device *kbdev)
+{
+ int i;
+
+ /* Populate the base_gpu_props structure */
+ gpu_props->core_props.version_status = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 0U, 4);
+ gpu_props->core_props.minor_revision = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 4U, 8);
+ gpu_props->core_props.major_revision = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 12U, 4);
+ gpu_props->core_props.product_id = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 16U, 16);
+ gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
+ gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
+
+ for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i];
+
+ gpu_props->l2_props.log2_line_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 0U, 8);
+ gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8);
+ if (gpu_props->core_props.product_id == GPU_ID_PI_T75X) {
+ gpu_props->l2_props.num_l2_slices = KBASE_UBFX32(gpu_props->raw_props.mem_features, 8U, 4) + 1;
+ } else {
+ gpu_props->l2_props.num_l2_slices = 1;
+ }
+
+ gpu_props->l3_props.log2_line_size = KBASE_UBFX32(gpu_props->raw_props.l3_features, 0U, 8);
+ gpu_props->l3_props.log2_cache_size = KBASE_UBFX32(gpu_props->raw_props.l3_features, 16U, 8);
+
+ gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(gpu_props->raw_props.tiler_features, 0U, 6);
+ gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(gpu_props->raw_props.tiler_features, 8U, 4);
+
+ gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 16);
+ gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 16U, 8);
+ gpu_props->thread_props.max_thread_group_split = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 6);
+ gpu_props->thread_props.impl_tech = KBASE_UBFX32(gpu_props->raw_props.thread_features, 30U, 2);
+
+ /* Initialize the coherent_group structure for each group */
+ kbase_gpuprops_construct_coherent_groups(gpu_props);
+}
+
+void kbase_gpuprops_set(kbase_device *kbdev)
+{
+ kbase_gpu_props *gpu_props;
+ struct midg_raw_gpu_props *raw;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ gpu_props = &kbdev->gpu_props;
+ raw = &gpu_props->props.raw_props;
+
+ /* Initialize the base_gpu_props structure from the hardware */
+ kbase_gpuprops_get_props(&gpu_props->props, kbdev);
+
+ /* Populate the derived properties */
+ kbase_gpuprops_calculate_props(&gpu_props->props, kbdev);
+
+ /* Populate kbase-only fields */
+ gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8);
+ gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8);
+
+ gpu_props->l3_props.associativity = KBASE_UBFX32(raw->l3_features, 8U, 8);
+ gpu_props->l3_props.external_bus_width = KBASE_UBFX32(raw->l3_features, 24U, 8);
+
+ gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1);
+ gpu_props->mem.supergroup = KBASE_UBFX32(raw->mem_features, 1U, 1);
+
+ gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8);
+ gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8);
+
+ gpu_props->num_cores = hweight64(raw->shader_present);
+ gpu_props->num_core_groups = hweight64(raw->l2_present);
+ gpu_props->num_supergroups = hweight64(raw->l3_present);
+ gpu_props->num_address_spaces = hweight32(raw->as_present);
+ gpu_props->num_job_slots = hweight32(raw->js_present);
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.h
new file mode 100755
index 00000000000..b00168c73c8
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpuprops.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_H_
+#define _KBASE_GPUPROPS_H_
+
+#include "mali_kbase_gpuprops_types.h"
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/**
+ * @brief Set up Kbase GPU properties.
+ *
+ * Set up Kbase GPU properties with information from the GPU registers
+ *
+ * @param kbdev The kbase_device structure for the device
+ */
+void kbase_gpuprops_set(struct kbase_device *kbdev);
+
+/**
+ * @brief Provide GPU properties to userside through UKU call.
+ *
+ * Fill the kbase_uk_gpuprops with values from GPU configuration registers.
+ *
+ * @param kctx The kbase_context structure
+ * @param kbase_props A copy of the kbase_uk_gpuprops structure from userspace
+ *
+ * @return MALI_ERROR_NONE on success. Any other value indicates failure.
+ */
+mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops * const kbase_props);
+
+#endif /* _KBASE_GPUPROPS_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops_types.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops_types.h
new file mode 100755
index 00000000000..3f36c63b820
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_gpuprops_types.h
@@ -0,0 +1,101 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpuprops_types.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_TYPES_H_
+#define _KBASE_GPUPROPS_TYPES_H_
+
+#include <kbase/mali_base_kernel.h>
+
+#define KBASE_GPU_SPEED_MHZ 123
+#define KBASE_GPU_PC_SIZE_LOG2 16U
+
+typedef struct kbase_gpuprops_regdump {
+ u32 gpu_id;
+ u32 l2_features;
+ u32 l3_features;
+ u32 tiler_features;
+ u32 mem_features;
+ u32 mmu_features;
+ u32 as_present;
+ u32 js_present;
+
+ u32 js_features[MIDG_MAX_JOB_SLOTS];
+
+ u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+
+ u32 shader_present_lo;
+ u32 shader_present_hi;
+
+ u32 tiler_present_lo;
+ u32 tiler_present_hi;
+
+ u32 l2_present_lo;
+ u32 l2_present_hi;
+
+ u32 l3_present_lo;
+ u32 l3_present_hi;
+
+ u32 thread_max_threads;
+ u32 thread_max_workgroup_size;
+ u32 thread_max_barrier_size;
+ u32 thread_features;
+} kbase_gpuprops_regdump;
+
+typedef struct kbase_gpu_cache_props {
+ u8 associativity;
+ u8 external_bus_width;
+} kbase_gpu_cache_props;
+
+typedef struct kbase_gpu_mem_props {
+ u8 core_group;
+ u8 supergroup;
+} kbase_gpu_mem_props;
+
+typedef struct kbase_gpu_mmu_props {
+ u8 va_bits;
+ u8 pa_bits;
+} kbase_gpu_mmu_props;
+
+typedef struct mali_kbase_gpu_props {
+ /* kernel-only properties */
+ u8 num_cores;
+ u8 num_core_groups;
+ u8 num_supergroups;
+ u8 num_address_spaces;
+ u8 num_job_slots;
+
+ kbase_gpu_cache_props l2_props;
+ kbase_gpu_cache_props l3_props;
+
+ kbase_gpu_mem_props mem;
+ kbase_gpu_mmu_props mmu;
+
+ /**
+ * Implementation specific irq throttle value (us), should be adjusted during integration.
+ */
+ int irq_throttle_time_us;
+
+ /* Properties shared with userspace */
+ base_gpu_props props;
+} kbase_gpu_props;
+
+#endif /* _KBASE_GPUPROPS_TYPES_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_hw.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_hw.c
new file mode 100755
index 00000000000..b9e6a8a4594
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_hw.c
@@ -0,0 +1,84 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file
+ * Run-time work-arounds helpers
+ */
+
+#include <kbase/mali_base_hwconfig.h>
+#include <kbase/src/common/mali_midg_regmap.h>
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+
+mali_error kbase_hw_set_issues_mask(kbase_device *kbdev)
+{
+ const base_hw_issue *issues;
+ u32 gpu_id;
+
+ gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+
+ switch (gpu_id) {
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_15DEV0):
+ issues = base_hw_issues_t60x_r0p0_15dev0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_EAC):
+ issues = base_hw_issues_t60x_r0p0_eac;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 1, 0):
+ issues = base_hw_issues_t60x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T65X, 0, 1, 0):
+ issues = base_hw_issues_t65x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 0, 1):
+ issues = base_hw_issues_t62x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0):
+ issues = base_hw_issues_t62x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 1):
+ issues = base_hw_issues_t62x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T67X, 0, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T67X, 0, 0, 1):
+ issues = base_hw_issues_t67x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T67X, 0, 1, 0):
+ issues = base_hw_issues_t67x_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T67X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T67X, 1, 0, 1):
+ issues = base_hw_issues_t67x_r1p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T75X, 0, 0, 0):
+ issues = base_hw_issues_t75x_r0p0;
+ break;
+ default:
+ KBASE_DEBUG_PRINT_ERROR(KBASE_CORE, "Unknown GPU ID %x", gpu_id);
+ return MALI_ERROR_FUNCTION_FAILED;
+ }
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "GPU identified as 0x%04x r%dp%d status %d", (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >> GPU_ID_VERSION_PRODUCT_ID_SHIFT, (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT, (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT, (gpu_id & GPU_ID_VERSION_STATUS) >> GPU_ID_VERSION_STATUS_SHIFT);
+
+ for (; *issues != BASE_HW_ISSUE_END; issues++)
+ set_bit(*issues, &kbdev->hw_issues_mask[0]);
+
+ return MALI_ERROR_NONE;
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_hw.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_hw.h
new file mode 100755
index 00000000000..6de1953c298
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_hw.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file
+ * Run-time work-arounds helpers
+ */
+
+#ifndef _KBASE_HW_H_
+#define _KBASE_HW_H_
+
+#include "mali_kbase_defs.h"
+
+/**
+ * @brief Tell whether a work-around should be enabled
+ */
+#define kbase_hw_has_issue(kbdev, issue)\
+ test_bit(issue, &(kbdev)->hw_issues_mask[0])
+
+/**
+ * @brief Set the HW issues mask depending on the GPU ID
+ */
+mali_error kbase_hw_set_issues_mask(kbase_device *kbdev);
+
+#endif /* _KBASE_HW_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_instr.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_instr.c
new file mode 100755
index 00000000000..484482065ae
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_instr.c
@@ -0,0 +1,616 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_instr.c
+ * Base kernel instrumentation APIs.
+ */
+
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_midg_regmap.h>
+
+/**
+ * @brief Issue Cache Clean & Invalidate command to hardware
+ */
+static void kbasep_instr_hwcnt_cacheclean(kbase_device *kbdev)
+{
+ unsigned long flags;
+ unsigned long pm_flags;
+ u32 irq_mask;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Wait for any reset to complete */
+ while (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.cache_clean_wait,
+ kbdev->hwcnt.state != KBASE_INSTR_STATE_RESETTING);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_REQUEST_CLEAN);
+
+ /* Enable interrupt */
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | CLEAN_CACHES_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);
+
+ /* clean&invalidate the caches so we're sure the mmu tables for the dump buffer is valid */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CLEAN_INV_CACHES, NULL);
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_CLEANING;
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+STATIC mali_error kbase_instr_hwcnt_enable_internal(kbase_device *kbdev, kbase_context *kctx, kbase_uk_hwcnt_setup *setup)
+{
+ unsigned long flags, pm_flags;
+ mali_error err = MALI_ERROR_FUNCTION_FAILED;
+ kbasep_js_device_data *js_devdata;
+ u32 irq_mask;
+ int ret;
+ u64 shader_cores_needed;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != setup);
+ KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx);
+
+ shader_cores_needed = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER);
+
+ js_devdata = &kbdev->js_data;
+
+ /* alignment failure */
+ if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1)))
+ goto out_err;
+
+ /* Override core availability policy to ensure all cores are available */
+ kbase_pm_ca_instr_enable(kbdev);
+
+ /* Mark the context as active so the GPU is kept turned on */
+ /* A suspend won't happen here, because we're in a syscall from a userspace
+ * thread. */
+ kbase_pm_context_active(kbdev);
+
+ /* Request the cores early on synchronously - we'll release them on any errors
+ * (e.g. instrumentation already active) */
+ kbase_pm_request_cores_sync(kbdev, MALI_TRUE, shader_cores_needed);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+
+ if (kbdev->hwcnt.state != KBASE_INSTR_STATE_DISABLED) {
+ /* Instrumentation is already enabled */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out_unrequest_cores;
+ }
+
+ /* Enable interrupt */
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | PRFCNT_SAMPLE_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);
+
+ /* In use, this context is the owner */
+ kbdev->hwcnt.kctx = kctx;
+ /* Remember the dump address so we can reprogram it later */
+ kbdev->hwcnt.addr = setup->dump_buffer;
+ /* Remember all the settings for suspend/resume */
+ if (&kbdev->hwcnt.suspended_state != setup)
+ memcpy(&kbdev->hwcnt.suspended_state, setup, sizeof(kbdev->hwcnt.suspended_state));
+
+ /* Request the clean */
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+ kbdev->hwcnt.triggered = 0;
+ /* Clean&invalidate the caches so we're sure the mmu tables for the dump buffer is valid */
+ ret = queue_work(kbdev->hwcnt.cache_clean_wq, &kbdev->hwcnt.cache_clean_work);
+ KBASE_DEBUG_ASSERT(ret);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Wait for cacheclean to complete */
+ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
+
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE);
+
+ /* Schedule the context in */
+ kbasep_js_schedule_privileged_ctx(kbdev, kctx);
+
+ /* Configure */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_OFF, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), setup->dump_buffer & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), setup->dump_buffer >> 32, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), setup->jm_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), setup->shader_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_L3_CACHE_EN), setup->l3_cache_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), setup->mmu_l2_bm, kctx);
+ /* Due to PRLAM-8186 we need to disable the Tiler before we enable the HW counter dump. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, kctx);
+ else
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, kctx);
+
+ /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.triggered = 1;
+ wake_up(&kbdev->hwcnt.wait);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ err = MALI_ERROR_NONE;
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "HW counters dumping set-up for context %p", kctx);
+ return err;
+ out_unrequest_cores:
+ kbase_pm_unrequest_cores(kbdev, MALI_TRUE, shader_cores_needed);
+ kbase_pm_context_idle(kbdev);
+ out_err:
+ return err;
+}
+
+/**
+ * @brief Enable HW counters collection
+ *
+ * Note: will wait for a cache clean to complete
+ */
+mali_error kbase_instr_hwcnt_enable(kbase_context *kctx, kbase_uk_hwcnt_setup *setup)
+{
+ kbase_device *kbdev;
+ mali_bool access_allowed;
+ kbdev = kctx->kbdev;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ /* Determine if the calling task has access to this capability */
+ access_allowed = kbase_security_has_capability(kctx, KBASE_SEC_INSTR_HW_COUNTERS_COLLECT, KBASE_SEC_FLAG_NOAUDIT);
+ if (MALI_FALSE == access_allowed)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ return kbase_instr_hwcnt_enable_internal(kbdev, kctx, setup);
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_enable)
+
+/**
+ * @brief Disable HW counters collection
+ *
+ * Note: might sleep, waiting for an ongoing dump to complete
+ */
+mali_error kbase_instr_hwcnt_disable(kbase_context *kctx)
+{
+ unsigned long flags, pm_flags;
+ mali_error err = MALI_ERROR_FUNCTION_FAILED;
+ u32 irq_mask;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ while (1) {
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_DISABLED) {
+ /* Instrumentation is not enabled */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out;
+ }
+
+ if (kbdev->hwcnt.kctx != kctx) {
+ /* Instrumentation has been setup for another context */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ goto out;
+ }
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE)
+ break;
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Ongoing dump/setup - wait for its completion */
+ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
+
+ }
+
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED;
+ kbdev->hwcnt.triggered = 0;
+
+ /* Disable interrupt */
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);
+
+ /* Disable the counters */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx);
+
+ kbdev->hwcnt.kctx = NULL;
+ kbdev->hwcnt.addr = 0ULL;
+
+ kbase_pm_ca_instr_disable(kbdev);
+
+ kbase_pm_unrequest_cores(kbdev, MALI_TRUE, kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER));
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Release the context. This had its own Power Manager Active reference */
+ kbasep_js_release_privileged_ctx(kbdev, kctx);
+
+ /* Also release our Power Manager Active reference */
+ kbase_pm_context_idle(kbdev);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "HW counters dumping disabled for context %p", kctx);
+
+ err = MALI_ERROR_NONE;
+
+ out:
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_disable)
+
+/**
+ * @brief Configure HW counters collection
+ */
+mali_error kbase_instr_hwcnt_setup(kbase_context *kctx, kbase_uk_hwcnt_setup *setup)
+{
+ mali_error err = MALI_ERROR_FUNCTION_FAILED;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ if (NULL == setup) {
+ /* Bad parameter - abort */
+ goto out;
+ }
+
+ if (setup->dump_buffer != 0ULL) {
+ /* Enable HW counters */
+ err = kbase_instr_hwcnt_enable(kctx, setup);
+ } else {
+ /* Disable HW counters */
+ err = kbase_instr_hwcnt_disable(kctx);
+ }
+
+ out:
+ return err;
+}
+
+/**
+ * @brief Issue Dump command to hardware
+ *
+ * Notes:
+ * - does not sleep
+ */
+mali_error kbase_instr_hwcnt_dump_irq(kbase_context *kctx)
+{
+ unsigned long flags;
+ mali_error err = MALI_ERROR_FUNCTION_FAILED;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.kctx != kctx) {
+ /* The instrumentation has been setup for another context */
+ goto unlock;
+ }
+
+ if (kbdev->hwcnt.state != KBASE_INSTR_STATE_IDLE) {
+ /* HW counters are disabled or another dump is ongoing, or we're resetting */
+ goto unlock;
+ }
+
+ kbdev->hwcnt.triggered = 0;
+
+ /* Mark that we're dumping - the PF handler can signal that we faulted */
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_DUMPING;
+
+ /* Reconfigure the dump address */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), kbdev->hwcnt.addr & 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), kbdev->hwcnt.addr >> 32, NULL);
+
+ /* Start dumping */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL, kbdev->hwcnt.addr, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_PRFCNT_SAMPLE, kctx);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_CORE, "HW counters dumping done for context %p", kctx);
+
+ err = MALI_ERROR_NONE;
+
+ unlock:
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_irq)
+
+/**
+ * @brief Tell whether the HW counters dump has completed
+ *
+ * Notes:
+ * - does not sleep
+ * - success will be set to MALI_TRUE if the dump succeeded or
+ * MALI_FALSE on failure
+ */
+mali_bool kbase_instr_hwcnt_dump_complete(kbase_context *kctx, mali_bool * const success)
+{
+ unsigned long flags;
+ mali_bool complete = MALI_FALSE;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != success);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE) {
+ *success = MALI_TRUE;
+ complete = MALI_TRUE;
+ } else if (kbdev->hwcnt.state == KBASE_INSTR_STATE_FAULT) {
+ *success = MALI_FALSE;
+ complete = MALI_TRUE;
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ return complete;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete)
+
+/**
+ * @brief Issue Dump command to hardware and wait for completion
+ */
+mali_error kbase_instr_hwcnt_dump(kbase_context *kctx)
+{
+ unsigned long flags;
+ mali_error err = MALI_ERROR_FUNCTION_FAILED;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ err = kbase_instr_hwcnt_dump_irq(kctx);
+ if (MALI_ERROR_NONE != err) {
+ /* Can't dump HW counters */
+ goto out;
+ }
+
+ /* Wait for dump & cacheclean to complete */
+ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_FAULT) {
+ err = MALI_ERROR_FUNCTION_FAILED;
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
+ } else {
+ /* Dump done */
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE);
+ err = MALI_ERROR_NONE;
+ }
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ out:
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump)
+
+/**
+ * @brief Clear the HW counters
+ */
+mali_error kbase_instr_hwcnt_clear(kbase_context *kctx)
+{
+ unsigned long flags;
+ mali_error err = MALI_ERROR_FUNCTION_FAILED;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+
+ /* Check it's the context previously set up and we're not already dumping */
+ if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.state != KBASE_INSTR_STATE_IDLE)
+ goto out;
+
+ /* Clear the counters */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_PRFCNT_CLEAR, kctx);
+
+ err = MALI_ERROR_NONE;
+
+ out:
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear)
+
+/**
+ * Workqueue for handling cache cleaning
+ */
+void kbasep_cache_clean_worker(struct work_struct *data)
+{
+ kbase_device *kbdev;
+ unsigned long flags;
+
+ kbdev = container_of(data, kbase_device, hwcnt.cache_clean_work);
+
+ mutex_lock(&kbdev->cacheclean_lock);
+ kbasep_instr_hwcnt_cacheclean(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Wait for our condition, and any reset to complete */
+ while (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING
+ || kbdev->hwcnt.state == KBASE_INSTR_STATE_CLEANING) {
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.cache_clean_wait,
+ (kbdev->hwcnt.state != KBASE_INSTR_STATE_RESETTING
+ && kbdev->hwcnt.state != KBASE_INSTR_STATE_CLEANING));
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+ KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_CLEANED);
+
+ /* All finished and idle */
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.triggered = 1;
+ wake_up(&kbdev->hwcnt.wait);
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ mutex_unlock(&kbdev->cacheclean_lock);
+}
+
+/**
+ * @brief Dump complete interrupt received
+ */
+void kbase_instr_hwcnt_sample_done(kbase_device *kbdev)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_FAULT) {
+ kbdev->hwcnt.triggered = 1;
+ wake_up(&kbdev->hwcnt.wait);
+ } else if (kbdev->hwcnt.state == KBASE_INSTR_STATE_DUMPING) {
+ int ret;
+ /* Always clean and invalidate the cache after a successful dump */
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+ ret = queue_work(kbdev->hwcnt.cache_clean_wq, &kbdev->hwcnt.cache_clean_work);
+ KBASE_DEBUG_ASSERT(ret);
+ }
+ /* NOTE: In the state KBASE_INSTR_STATE_RESETTING, We're in a reset,
+ * and the instrumentation state hasn't been restored yet -
+ * kbasep_reset_timeout_worker() will do the rest of the work */
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+/**
+ * @brief Cache clean interrupt received
+ */
+void kbase_clean_caches_done(kbase_device *kbdev)
+{
+ u32 irq_mask;
+
+ if (kbdev->hwcnt.state != KBASE_INSTR_STATE_DISABLED) {
+ unsigned long flags;
+ unsigned long pm_flags;
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Disable interrupt */
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask & ~CLEAN_CACHES_COMPLETED, NULL);
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);
+
+ /* Wakeup... */
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_CLEANING) {
+ /* Only wake if we weren't resetting */
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_CLEANED;
+ wake_up(&kbdev->hwcnt.cache_clean_wait);
+ }
+ /* NOTE: In the state KBASE_INSTR_STATE_RESETTING, We're in a reset,
+ * and the instrumentation state hasn't been restored yet -
+ * kbasep_reset_timeout_worker() will do the rest of the work */
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ }
+}
+
+
+/* Disable instrumentation and wait for any existing dump to complete
+ * It's assumed that there's only one privileged context
+ * Safe to do this without lock when doing an OS suspend, because it only
+ * changes in response to user-space IOCTLs */
+void kbase_instr_hwcnt_suspend(kbase_device *kbdev)
+{
+ kbase_context *kctx;
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(!kbdev->hwcnt.suspended_kctx);
+
+ kctx = kbdev->hwcnt.kctx;
+ kbdev->hwcnt.suspended_kctx = kctx;
+
+ /* Relevant state was saved into hwcnt.suspended_state when enabling the
+ * counters */
+
+ if (kctx)
+ {
+ KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED);
+ kbase_instr_hwcnt_disable(kctx);
+ }
+}
+
+void kbase_instr_hwcnt_resume(kbase_device *kbdev)
+{
+ kbase_context *kctx;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ kctx = kbdev->hwcnt.suspended_kctx;
+ kbdev->hwcnt.suspended_kctx = NULL;
+
+ if (kctx)
+ {
+ mali_error err;
+ err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &kbdev->hwcnt.suspended_state);
+ WARN(err != MALI_ERROR_NONE,
+ "Failed to restore instrumented hardware counters on resume\n");
+ }
+}
+
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jd.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jd.c
new file mode 100755
index 00000000000..f1961d2c511
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jd.c
@@ -0,0 +1,1438 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_kbase_uku.h>
+#include <kbase/src/common/mali_kbase_js_affinity.h>
+#include <kbase/src/common/mali_kbase_10969_workaround.h>
+#ifdef CONFIG_UMP
+#include <linux/ump.h>
+#endif /* CONFIG_UMP */
+
+#define beenthere(f, a...) KBASE_DEBUG_PRINT_INFO(KBASE_JD, "%s:" f, __func__, ##a)
+
+/*
+ * This is the kernel side of the API. Only entry points are:
+ * - kbase_jd_submit(): Called from userspace to submit a single bag
+ * - kbase_jd_done(): Called from interrupt context to track the
+ * completion of a job.
+ * Callouts:
+ * - to the job manager (enqueue a job)
+ * - to the event subsystem (signals the completion/failure of bag/job-chains).
+ */
+
+static void *get_compat_pointer(const kbase_pointer *p)
+{
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ return (void *)p->compat_value;
+ else
+#endif
+ return p->value;
+}
+
+/* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs
+ *
+ * Returns whether the JS needs a reschedule.
+ *
+ * Note that the caller must also check the atom status and
+ * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
+ */
+static int jd_run_atom(kbase_jd_atom *katom)
+{
+ kbase_context *kctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+ if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
+ /* Dependency only atom */
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ return 0;
+ } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+ /* Soft-job */
+ if (kbase_process_soft_job(katom) == 0) {
+ kbase_finish_soft_job(katom);
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ } else {
+ /* The job has not completed */
+ list_add_tail(&katom->dep_item[0], &kctx->waiting_soft_jobs);
+ }
+ return 0;
+ }
+ katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+ /* Queue an action about whether we should try scheduling a context */
+ return kbasep_js_add_job(kctx, katom);
+}
+
+#ifdef CONFIG_KDS
+
+/* Add the katom to the kds waiting list.
+ * Atoms must be added to the waiting list after a successful call to kds_async_waitall.
+ * The caller must hold the kbase_jd_context.lock */
+
+static void kbase_jd_kds_waiters_add(kbase_jd_atom *katom)
+{
+ kbase_context *kctx;
+ KBASE_DEBUG_ASSERT(katom);
+
+ kctx = katom->kctx;
+
+ list_add_tail(&katom->node, &kctx->waiting_kds_resource);
+}
+
+/* Remove the katom from the kds waiting list.
+ * Atoms must be removed from the waiting list before a call to kds_resource_set_release_sync.
+ * The supplied katom must first have been added to the list with a call to kbase_jd_kds_waiters_add.
+ * The caller must hold the kbase_jd_context.lock */
+
+static void kbase_jd_kds_waiters_remove(kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+ list_del(&katom->node);
+}
+
+static void kds_dep_clear(void *callback_parameter, void *callback_extra_parameter)
+{
+ kbase_jd_atom *katom;
+ kbase_jd_context *ctx;
+ kbase_device *kbdev;
+
+ katom = (kbase_jd_atom *) callback_parameter;
+ KBASE_DEBUG_ASSERT(katom);
+ ctx = &katom->kctx->jctx;
+ kbdev = katom->kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ mutex_lock(&ctx->lock);
+
+ /* KDS resource has already been satisfied (e.g. due to zapping) */
+ if (katom->kds_dep_satisfied)
+ goto out;
+
+ /* This atom's KDS dependency has now been met */
+ katom->kds_dep_satisfied = MALI_TRUE;
+
+ /* Check whether the atom's other dependencies were already met */
+ if (!katom->dep_atom[0] && !katom->dep_atom[1]) {
+ /* katom dep complete, attempt to run it */
+ mali_bool resched = MALI_FALSE;
+ resched = jd_run_atom(katom);
+
+ if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+ /* The atom has already finished */
+ resched |= jd_done_nolock(katom);
+ }
+
+ if (resched)
+ kbasep_js_try_schedule_head_ctx(kbdev);
+ }
+ out:
+ mutex_unlock(&ctx->lock);
+}
+
+void kbase_cancel_kds_wait_job(kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+
+ /* Prevent job_done_nolock from being called twice on an atom when
+ * there is a race between job completion and cancellation */
+
+ if ( katom->status == KBASE_JD_ATOM_STATE_QUEUED ) {
+ /* Wait was cancelled - zap the atom */
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ if (jd_done_nolock(katom)) {
+ kbasep_js_try_schedule_head_ctx( katom->kctx->kbdev );
+ }
+ }
+}
+#endif /* CONFIG_KDS */
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static mali_error kbase_jd_umm_map(kbase_context *kctx, struct kbase_va_region *reg)
+{
+ struct sg_table *st;
+ struct scatterlist *s;
+ int i;
+ phys_addr_t *pa;
+ mali_error err;
+
+ KBASE_DEBUG_ASSERT(NULL == reg->imported_metadata.umm.st);
+ st = dma_buf_map_attachment(reg->imported_metadata.umm.dma_attachment, DMA_BIDIRECTIONAL);
+
+ if (IS_ERR_OR_NULL(st))
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ /* save for later */
+ reg->imported_metadata.umm.st = st;
+
+ pa = kbase_get_phy_pages(reg);
+ KBASE_DEBUG_ASSERT(pa);
+
+ for_each_sg(st->sgl, s, st->nents, i) {
+ int j;
+ size_t pages = PFN_DOWN(sg_dma_len(s));
+
+ for (j = 0; j < pages; j++)
+ *pa++ = sg_dma_address(s) + (j << PAGE_SHIFT);
+ }
+
+ err = kbase_mmu_insert_pages(kctx, reg->start_pfn, kbase_get_phy_pages(reg), reg->nr_alloc_pages, reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD);
+
+ if (MALI_ERROR_NONE != err) {
+ dma_buf_unmap_attachment(reg->imported_metadata.umm.dma_attachment, reg->imported_metadata.umm.st, DMA_BIDIRECTIONAL);
+ reg->imported_metadata.umm.st = NULL;
+ }
+
+ return err;
+}
+
+static void kbase_jd_umm_unmap(kbase_context *kctx, struct kbase_va_region *reg)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+ KBASE_DEBUG_ASSERT(reg);
+ KBASE_DEBUG_ASSERT(reg->imported_metadata.umm.dma_attachment);
+ KBASE_DEBUG_ASSERT(reg->imported_metadata.umm.st);
+ kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_alloc_pages);
+ dma_buf_unmap_attachment(reg->imported_metadata.umm.dma_attachment, reg->imported_metadata.umm.st, DMA_BIDIRECTIONAL);
+ reg->imported_metadata.umm.st = NULL;
+}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+void kbase_jd_free_external_resources(kbase_jd_atom *katom)
+{
+#ifdef CONFIG_KDS
+ if (katom->kds_rset) {
+ kbase_jd_context * jctx = &katom->kctx->jctx;
+
+ /*
+ * As the atom is no longer waiting, remove it from
+ * the waiting list.
+ */
+
+ mutex_lock(&jctx->lock);
+ kbase_jd_kds_waiters_remove( katom );
+ mutex_unlock(&jctx->lock);
+
+ /* Release the kds resource or cancel if zapping */
+ kds_resource_set_release_sync(&katom->kds_rset);
+ }
+#endif /* CONFIG_KDS */
+}
+
+static void kbase_jd_post_external_resources(kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+#ifdef CONFIG_KDS
+ /* Prevent the KDS resource from triggering the atom in case of zapping */
+ if (katom->kds_rset)
+ katom->kds_dep_satisfied = MALI_TRUE;
+#endif /* CONFIG_KDS */
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
+ /* Lock also used in debug mode just for lock order checking */
+ kbase_gpu_vm_lock(katom->kctx);
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
+ /* only roll back if extres is non-NULL */
+ if (katom->extres) {
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ u32 res_no;
+ res_no = katom->nr_extres;
+ while (res_no-- > 0) {
+ base_external_resource *res;
+ kbase_va_region *reg;
+
+ res = &katom->extres[res_no];
+ reg = kbase_region_tracker_find_region_enclosing_address(katom->kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+ /* if reg wasn't found then it has been freed while the job ran */
+ if (reg && reg->imported_type == BASE_TMEM_IMPORT_TYPE_UMM) {
+ /* last job using */
+ if (1 == reg->imported_metadata.umm.current_mapping_usage_count--)
+ kbase_jd_umm_unmap(katom->kctx, reg);
+ }
+ }
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+ kfree(katom->extres);
+ katom->extres = NULL;
+ }
+#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
+ /* Lock also used in debug mode just for lock order checking */
+ kbase_gpu_vm_unlock(katom->kctx);
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
+}
+
+#if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS)
+static void add_kds_resource(struct kds_resource *kds_res, struct kds_resource **kds_resources, u32 *kds_res_count, unsigned long *kds_access_bitmap, mali_bool exclusive)
+{
+ u32 i;
+
+ for (i = 0; i < *kds_res_count; i++) {
+ /* Duplicate resource, ignore */
+ if (kds_resources[i] == kds_res)
+ return;
+ }
+
+ kds_resources[*kds_res_count] = kds_res;
+ if (exclusive)
+ set_bit(*kds_res_count, kds_access_bitmap);
+ (*kds_res_count)++;
+}
+#endif
+
+/*
+ * Set up external resources needed by this job.
+ *
+ * jctx.lock must be held when this is called.
+ */
+
+static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const base_jd_atom_v2 *user_atom)
+{
+ mali_error err_ret_val = MALI_ERROR_FUNCTION_FAILED;
+ u32 res_no;
+#ifdef CONFIG_KDS
+ u32 kds_res_count = 0;
+ struct kds_resource **kds_resources = NULL;
+ unsigned long *kds_access_bitmap = NULL;
+#endif /* CONFIG_KDS */
+
+ KBASE_DEBUG_ASSERT(katom);
+ KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+ /* no resources encoded, early out */
+ if (!katom->nr_extres)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ katom->extres = kmalloc(sizeof(base_external_resource) * katom->nr_extres, GFP_KERNEL);
+ if (NULL == katom->extres) {
+ err_ret_val = MALI_ERROR_OUT_OF_MEMORY;
+ goto early_err_out;
+ }
+
+ if (copy_from_user(katom->extres, get_compat_pointer(&user_atom->extres_list), sizeof(base_external_resource) * katom->nr_extres) != 0) {
+ err_ret_val = MALI_ERROR_FUNCTION_FAILED;
+ goto early_err_out;
+ }
+#ifdef CONFIG_KDS
+ /* assume we have to wait for all */
+ KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
+ kds_resources = kmalloc(sizeof(struct kds_resource *) * katom->nr_extres, GFP_KERNEL);
+
+ if (NULL == kds_resources) {
+ err_ret_val = MALI_ERROR_OUT_OF_MEMORY;
+ goto early_err_out;
+ }
+
+ KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
+ kds_access_bitmap = kzalloc(sizeof(unsigned long) * ((katom->nr_extres + BITS_PER_LONG - 1) / BITS_PER_LONG), GFP_KERNEL);
+
+ if (NULL == kds_access_bitmap) {
+ err_ret_val = MALI_ERROR_OUT_OF_MEMORY;
+ goto early_err_out;
+ }
+#endif /* CONFIG_KDS */
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
+ /* need to keep the GPU VM locked while we set up UMM buffers */
+ /* Lock also used in debug mode just for lock order checking */
+ kbase_gpu_vm_lock(katom->kctx);
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
+
+ for (res_no = 0; res_no < katom->nr_extres; res_no++) {
+ base_external_resource *res;
+ kbase_va_region *reg;
+
+ res = &katom->extres[res_no];
+ reg = kbase_region_tracker_find_region_enclosing_address(katom->kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+ /* did we find a matching region object? */
+ if (NULL == reg) {
+ /* roll back */
+ goto failed_loop;
+ }
+
+ /* decide what needs to happen for this resource */
+ switch (reg->imported_type) {
+ case BASE_TMEM_IMPORT_TYPE_UMP:
+ {
+#if defined(CONFIG_KDS) && defined(CONFIG_UMP)
+ struct kds_resource *kds_res;
+ kds_res = ump_dd_kds_resource_get(reg->imported_metadata.ump_handle);
+ if (kds_res)
+ add_kds_resource(kds_res, kds_resources, &kds_res_count, kds_access_bitmap, katom->extres[res_no].ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE);
+#endif /*defined(CONFIG_KDS) && defined(CONFIG_UMP) */
+ break;
+ }
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case BASE_TMEM_IMPORT_TYPE_UMM:
+ {
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ struct kds_resource *kds_res;
+ kds_res = get_dma_buf_kds_resource(reg->imported_metadata.umm.dma_buf);
+ if (kds_res)
+ add_kds_resource(kds_res, kds_resources, &kds_res_count, kds_access_bitmap, katom->extres[res_no].ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE);
+#endif
+ reg->imported_metadata.umm.current_mapping_usage_count++;
+ if (1 == reg->imported_metadata.umm.current_mapping_usage_count) {
+ /* use a local variable to not pollute err_ret_val
+ * with a potential success value as some other gotos depend
+ * on the default error code stored in err_ret_val */
+ mali_error tmp;
+ tmp = kbase_jd_umm_map(katom->kctx, reg);
+ if (MALI_ERROR_NONE != tmp) {
+ /* failed to map this buffer, roll back */
+ err_ret_val = tmp;
+ reg->imported_metadata.umm.current_mapping_usage_count--;
+ goto failed_loop;
+ }
+ }
+ break;
+ }
+#endif
+ default:
+ goto failed_loop;
+ }
+ }
+ /* successfully parsed the extres array */
+#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
+ /* drop the vm lock before we call into kds */
+ /* Lock also used in debug mode just for lock order checking */
+ kbase_gpu_vm_unlock(katom->kctx);
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
+
+#ifdef CONFIG_KDS
+ if (kds_res_count) {
+ int wait_failed;
+ /* We have resources to wait for with kds */
+ katom->kds_dep_satisfied = MALI_FALSE;
+
+ wait_failed = kds_async_waitall(&katom->kds_rset,
+ &katom->kctx->jctx.kds_cb,
+ katom,
+ NULL,
+ kds_res_count,
+ kds_access_bitmap,
+ kds_resources);
+ if (wait_failed) {
+ goto failed_kds_setup;
+ } else {
+ kbase_jd_kds_waiters_add( katom );
+ }
+ } else {
+ /* Nothing to wait for, so kds dep met */
+ katom->kds_dep_satisfied = MALI_TRUE;
+ }
+ kfree(kds_resources);
+ kfree(kds_access_bitmap);
+#endif /* CONFIG_KDS */
+
+ /* all done OK */
+ return MALI_ERROR_NONE;
+
+/* error handling section */
+
+#ifdef CONFIG_KDS
+ failed_kds_setup:
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
+ /* lock before we unmap */
+ /* Lock also used in debug mode just for lock order checking */
+ kbase_gpu_vm_lock(katom->kctx);
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
+#endif /* CONFIG_KDS */
+
+ failed_loop:
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ /* undo the loop work */
+ while (res_no-- > 0) {
+ base_external_resource *res;
+ kbase_va_region *reg;
+
+ res = &katom->extres[res_no];
+ reg = kbase_region_tracker_find_region_enclosing_address(katom->kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+ /* if reg wasn't found then it has been freed when we set up kds */
+ if (reg && reg->imported_type == BASE_TMEM_IMPORT_TYPE_UMM) {
+ reg->imported_metadata.umm.current_mapping_usage_count--;
+ if (0 == reg->imported_metadata.umm.current_mapping_usage_count)
+ kbase_jd_umm_unmap(katom->kctx, reg);
+ }
+ }
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
+ /* Lock also used in debug mode just for lock order checking */
+ kbase_gpu_vm_unlock(katom->kctx);
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
+
+ early_err_out:
+ kfree(katom->extres);
+ katom->extres = NULL;
+#ifdef CONFIG_KDS
+ kfree(kds_resources);
+ kfree(kds_access_bitmap);
+#endif /* CONFIG_KDS */
+ return err_ret_val;
+}
+
+STATIC INLINE void jd_resolve_dep(struct list_head *out_list, kbase_jd_atom *katom, u8 d)
+{
+ u8 other_d = !d;
+
+ while (!list_empty(&katom->dep_head[d])) {
+ kbase_jd_atom *dep_atom = list_entry(katom->dep_head[d].next, kbase_jd_atom, dep_item[d]);
+ list_del(katom->dep_head[d].next);
+
+ dep_atom->dep_atom[d] = NULL;
+
+ if (katom->event_code != BASE_JD_EVENT_DONE) {
+ /* Atom failed, so remove the other dependencies and immediately fail the atom */
+ if (dep_atom->dep_atom[other_d]) {
+ list_del(&dep_atom->dep_item[other_d]);
+ dep_atom->dep_atom[other_d] = NULL;
+ }
+#ifdef CONFIG_KDS
+ if (!dep_atom->kds_dep_satisfied) {
+ /* Just set kds_dep_satisfied to true. If the callback happens after this then it will early out and
+ * do nothing. If the callback doesn't happen then kbase_jd_post_external_resources will clean up
+ */
+ dep_atom->kds_dep_satisfied = MALI_TRUE;
+ }
+#endif
+
+ dep_atom->event_code = katom->event_code;
+ KBASE_DEBUG_ASSERT(dep_atom->status != KBASE_JD_ATOM_STATE_UNUSED);
+ dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+
+ list_add(&dep_atom->dep_item[0], out_list);
+ } else if (!dep_atom->dep_atom[other_d]) {
+#ifdef CONFIG_KDS
+ if (dep_atom->kds_dep_satisfied)
+#endif
+ list_add(&dep_atom->dep_item[0], out_list);
+ }
+ }
+}
+
+KBASE_EXPORT_TEST_API(jd_resolve_dep)
+
+/*
+ * Perform the necessary handling of an atom that has finished running
+ * on the GPU.
+ *
+ * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
+ * is responsible for calling kbase_finish_soft_job *before* calling this function.
+ *
+ * The caller must hold the kbase_jd_context.lock.
+ */
+mali_bool jd_done_nolock(kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ kbase_device *kbdev = kctx->kbdev;
+ struct list_head completed_jobs;
+ struct list_head runnable_jobs;
+ mali_bool need_to_try_schedule_context = MALI_FALSE;
+ int i;
+
+ INIT_LIST_HEAD(&completed_jobs);
+ INIT_LIST_HEAD(&runnable_jobs);
+
+ KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+ /* This is needed in case an atom is failed due to being invalid, this
+ * can happen *before* the jobs that the atom depends on have completed */
+ for (i = 0; i < 2; i++) {
+ if (katom->dep_atom[i]) {
+ list_del(&katom->dep_item[i]);
+ katom->dep_atom[i] = NULL;
+ }
+ }
+
+ /* With PRLAM-10817 the last tile of a fragment job being soft-stopped can fail with
+ * BASE_JD_EVENT_TILE_RANGE_FAULT.
+ *
+ * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the
+ * error code to BASE_JD_EVENT_DONE
+ */
+
+ if ( kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) && katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT ) {
+ if ( ( katom->core_req & BASE_JD_REQ_FS ) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) ) {
+ /* Promote the failure to job done */
+ katom->event_code = BASE_JD_EVENT_DONE;
+ katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED);
+ }
+ }
+
+ katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+ list_add_tail(&katom->dep_item[0], &completed_jobs);
+
+ while (!list_empty(&completed_jobs)) {
+ katom = list_entry(completed_jobs.prev, kbase_jd_atom, dep_item[0]);
+ list_del(completed_jobs.prev);
+
+ KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+ for (i = 0; i < 2; i++)
+ jd_resolve_dep(&runnable_jobs, katom, i);
+
+ while (!list_empty(&runnable_jobs)) {
+ kbase_jd_atom *node = list_entry(runnable_jobs.prev, kbase_jd_atom, dep_item[0]);
+ list_del(runnable_jobs.prev);
+
+ KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+ if (katom->event_code == BASE_JD_EVENT_DONE) {
+ need_to_try_schedule_context |= jd_run_atom(node);
+ } else {
+ node->event_code = katom->event_code;
+ node->status = KBASE_JD_ATOM_STATE_COMPLETED;
+
+ if (node->core_req & BASE_JD_REQ_SOFT_JOB) {
+ /* If this is a fence wait then remove it from the list of sync waiters. */
+ if ( BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req )
+ list_del(&node->dep_item[0]);
+
+ kbase_finish_soft_job(node);
+ }
+ }
+
+ if (node->status == KBASE_JD_ATOM_STATE_COMPLETED)
+ list_add_tail(&node->dep_item[0], &completed_jobs);
+ }
+
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+ kbase_jd_post_external_resources(katom);
+
+ kbase_event_post(kctx, katom);
+
+ /* Decrement and check the TOTAL number of jobs. This includes
+ * those not tracked by the scheduler: 'not ready to run' and
+ * 'dependency-only' jobs. */
+ if (--kctx->jctx.job_nr == 0)
+ wake_up(&kctx->jctx.zero_jobs_wait); /* All events are safely queued now, and we can signal any waiter
+ * that we've got no more jobs (so we can be safely terminated) */
+ }
+
+ return need_to_try_schedule_context;
+}
+
+KBASE_EXPORT_TEST_API(jd_done_nolock)
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+enum {
+ CORE_REQ_DEP_ONLY,
+ CORE_REQ_SOFT,
+ CORE_REQ_COMPUTE,
+ CORE_REQ_FRAGMENT,
+ CORE_REQ_VERTEX,
+ CORE_REQ_TILER,
+ CORE_REQ_FRAGMENT_VERTEX,
+ CORE_REQ_FRAGMENT_VERTEX_TILER,
+ CORE_REQ_FRAGMENT_TILER,
+ CORE_REQ_VERTEX_TILER,
+ CORE_REQ_UNKNOWN
+};
+static const char * const core_req_strings[] = {
+ "Dependency Only Job",
+ "Soft Job",
+ "Compute Shader Job",
+ "Fragment Shader Job",
+ "Vertex/Geometry Shader Job",
+ "Tiler Job",
+ "Fragment Shader + Vertex/Geometry Shader Job",
+ "Fragment Shader + Vertex/Geometry Shader Job + Tiler Job",
+ "Fragment Shader + Tiler Job",
+ "Vertex/Geometry Shader Job + Tiler Job",
+ "Unknown Job"
+};
+static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req)
+{
+ if (core_req & BASE_JD_REQ_SOFT_JOB)
+ return core_req_strings[CORE_REQ_SOFT];
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ return core_req_strings[CORE_REQ_COMPUTE];
+ switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) {
+ case BASE_JD_REQ_DEP:
+ return core_req_strings[CORE_REQ_DEP_ONLY];
+ case BASE_JD_REQ_FS:
+ return core_req_strings[CORE_REQ_FRAGMENT];
+ case BASE_JD_REQ_CS:
+ return core_req_strings[CORE_REQ_VERTEX];
+ case BASE_JD_REQ_T:
+ return core_req_strings[CORE_REQ_TILER];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_CS):
+ return core_req_strings[CORE_REQ_FRAGMENT_VERTEX];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_FRAGMENT_TILER];
+ case (BASE_JD_REQ_CS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_VERTEX_TILER];
+ case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T):
+ return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER];
+ }
+ return core_req_strings[CORE_REQ_UNKNOWN];
+}
+#endif
+
+static mali_bool jd_submit_atom(kbase_context *kctx, const base_jd_atom_v2 *user_atom)
+{
+ kbase_jd_context *jctx = &kctx->jctx;
+ kbase_jd_atom *katom;
+ base_jd_core_req core_req;
+ base_atom_id atom_number = user_atom->atom_number;
+ int queued = 0;
+ int i;
+ mali_bool ret;
+
+ katom = &jctx->atoms[atom_number];
+
+ mutex_lock(&jctx->lock);
+ while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) {
+ /* Atom number is already in use, wait for the atom to complete */
+ mutex_unlock(&jctx->lock);
+
+ /* This thread will wait for the atom to complete. Due to thread scheduling we are not sure that
+ * the other thread that owns the atom will also schedule the context, so we force the scheduler
+ * to be active and hence eventually schedule this context at some point later.
+ */
+ kbasep_js_try_schedule_head_ctx(kctx->kbdev);
+ if (wait_event_killable(katom->completed, katom->status == KBASE_JD_ATOM_STATE_UNUSED)) {
+ /* We're being killed so the result code doesn't really matter */
+ return MALI_FALSE;
+ }
+ mutex_lock(&jctx->lock);
+ }
+
+ /* Update the TOTAL number of jobs. This includes those not tracked by
+ * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
+ jctx->job_nr++;
+
+ core_req = user_atom->core_req;
+
+ katom->udata = user_atom->udata;
+ katom->kctx = kctx;
+ katom->nr_extres = user_atom->nr_extres;
+ katom->extres = NULL;
+ katom->device_nr = user_atom->device_nr;
+ katom->affinity = 0;
+ katom->jc = user_atom->jc;
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+ katom->core_req = core_req;
+ katom->nice_prio = user_atom->prio;
+ katom->atom_flags = 0;
+
+#ifdef CONFIG_KDS
+ /* Start by assuming that the KDS dependencies are satisfied,
+ * kbase_jd_pre_external_resources will correct this if there are dependencies */
+ katom->kds_dep_satisfied = MALI_TRUE;
+ katom->kds_rset = NULL;
+#endif /* CONFIG_KDS */
+
+ /* Add dependencies */
+ for (i = 0; i < 2; i++) {
+ int dep_atom_number = user_atom->pre_dep[i];
+ katom->dep_atom[i] = NULL;
+ if (dep_atom_number) {
+ kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
+
+ if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED || dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+ if (dep_atom->event_code != BASE_JD_EVENT_DONE) {
+ if (i == 1 && katom->dep_atom[0]) {
+ /* Remove the previous dependency */
+ list_del(&katom->dep_item[0]);
+ katom->dep_atom[0] = NULL;
+ }
+ /* Atom has completed, propagate the error code if any */
+ katom->event_code = dep_atom->event_code;
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+ ret = jd_done_nolock(katom);
+ goto out;
+ }
+ } else {
+ /* Atom is in progress, add this atom to the list */
+ list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
+ katom->dep_atom[i] = dep_atom;
+ queued = 1;
+ }
+ }
+ }
+
+ /* These must occur after the above loop to ensure that an atom that
+ * depends on a previous atom with the same number behaves as expected */
+ katom->event_code = BASE_JD_EVENT_DONE;
+ katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+ /* Reject atoms with job chain = NULL, as these cause issues with soft-stop */
+ if (0 == katom->jc && (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP)
+ {
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD, "Rejecting atom with jc = NULL");
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom);
+ goto out;
+ }
+
+ /*
+ * If the priority is increased we need to check the caller has security caps to do this, if
+ * priority is decreased then this is ok as the result will have no negative impact on other
+ * processes running.
+ */
+ if (0 > katom->nice_prio) {
+ mali_bool access_allowed;
+ access_allowed = kbase_security_has_capability(kctx, KBASE_SEC_MODIFY_PRIORITY, KBASE_SEC_FLAG_NOAUDIT);
+ if (!access_allowed) {
+ /* For unprivileged processes - a negative priority is interpreted as zero */
+ katom->nice_prio = 0;
+ }
+ }
+
+ /* Scale priority range to use NICE range */
+ if (katom->nice_prio) {
+ /* Remove sign for calculation */
+ int nice_priority = katom->nice_prio + 128;
+ /* Fixed point maths to scale from ..255 to 0..39 (NICE range with +20 offset) */
+ katom->nice_prio = (((20 << 16) / 128) * nice_priority) >> 16;
+ }
+
+ if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+ /* handle what we need to do to access the external resources */
+ if (MALI_ERROR_NONE != kbase_jd_pre_external_resources(katom, user_atom)) {
+ /* setup failed (no access, bad resource, unknown resource types, etc.) */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom);
+ goto out;
+ }
+ }
+
+ /* Initialize the jobscheduler policy for this atom. Function will
+ * return error if the atom is malformed.
+ *
+ * Soft-jobs never enter the job scheduler but have their own initialize method.
+ *
+ * If either fail then we immediately complete the atom with an error.
+ */
+ if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
+ kbasep_js_policy *js_policy = &(kctx->kbdev->js_data.policy);
+ if (MALI_ERROR_NONE != kbasep_js_policy_init_job(js_policy, kctx, katom)) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom);
+ goto out;
+ }
+ } else {
+ /* Soft-job */
+ if (MALI_ERROR_NONE != kbase_prepare_soft_job(katom)) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom);
+ goto out;
+ }
+ }
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+ katom->work_id = atomic_inc_return(&jctx->work_id);
+ trace_gpu_job_enqueue((u32)kctx, katom->work_id, kbasep_map_core_reqs_to_string(katom->core_req));
+#endif
+
+ if (queued) {
+ ret = MALI_FALSE;
+ goto out;
+ }
+#ifdef CONFIG_KDS
+ if (!katom->kds_dep_satisfied) {
+ /* Queue atom due to KDS dependency */
+ ret = MALI_FALSE;
+ goto out;
+ }
+#endif /* CONFIG_KDS */
+
+ if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+ if (kbase_process_soft_job(katom) == 0) {
+ kbase_finish_soft_job(katom);
+ ret = jd_done_nolock(katom);
+ goto out;
+ }
+ /* The job has not yet completed */
+ list_add_tail(&katom->dep_item[0], &kctx->waiting_soft_jobs);
+ ret = MALI_FALSE;
+ } else if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+ katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+ ret = kbasep_js_add_job(kctx, katom);
+ } else {
+ /* This is a pure dependency. Resolve it immediately */
+ ret = jd_done_nolock(katom);
+ }
+
+ out:
+ mutex_unlock(&jctx->lock);
+ return ret;
+}
+
+mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *submit_data)
+{
+ mali_error err = MALI_ERROR_NONE;
+ int i;
+ mali_bool need_to_try_schedule_context = MALI_FALSE;
+ kbase_device *kbdev;
+ void *user_addr;
+
+ /*
+ * kbase_jd_submit isn't expected to fail and so all errors with the jobs
+ * are reported by immediately falling them (through event system)
+ */
+ kbdev = kctx->kbdev;
+
+ beenthere("%s", "Enter");
+
+ if ((kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) != 0) {
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
+ return MALI_ERROR_FUNCTION_FAILED;
+ }
+
+ if (submit_data->stride != sizeof(base_jd_atom_v2)) {
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Stride passed to job_submit doesn't match kernel");
+ return MALI_ERROR_FUNCTION_FAILED;
+ }
+
+ user_addr = get_compat_pointer(&submit_data->addr);
+
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(submit_data->nr_atoms, &kctx->timeline.jd_atoms_in_flight));
+
+ for (i = 0; i < submit_data->nr_atoms; i++) {
+ base_jd_atom_v2 user_atom;
+
+ if (copy_from_user(&user_atom, user_addr, sizeof(user_atom)) != 0) {
+ err = MALI_ERROR_FUNCTION_FAILED;
+ KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(submit_data->nr_atoms - i, &kctx->timeline.jd_atoms_in_flight));
+ break;
+ }
+
+ user_addr = (void *)((uintptr_t) user_addr + submit_data->stride);
+
+ need_to_try_schedule_context |= jd_submit_atom(kctx, &user_atom);
+ }
+
+ if (need_to_try_schedule_context)
+ kbasep_js_try_schedule_head_ctx(kbdev);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_submit)
+
+static void kbasep_jd_cacheclean(kbase_device *kbdev)
+{
+ /* Limit the number of loops to avoid a hang if the interrupt is missed */
+ u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+
+ mutex_lock(&kbdev->cacheclean_lock);
+
+ /* use GPU_COMMAND completion solution */
+ /* clean & invalidate the caches */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CLEAN_INV_CACHES, NULL);
+
+ /* wait for cache flush to complete before continuing */
+ while (--max_loops && (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) & CLEAN_CACHES_COMPLETED) == 0)
+ ;
+
+ /* clear the CLEAN_CACHES_COMPLETED irq */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, CLEAN_CACHES_COMPLETED);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), CLEAN_CACHES_COMPLETED, NULL);
+ KBASE_DEBUG_ASSERT_MSG(kbdev->hwcnt.state != KBASE_INSTR_STATE_CLEANING,
+ "Instrumentation code was cleaning caches, but Job Management code cleared their IRQ - Instrumentation code will now hang.");
+
+ mutex_unlock(&kbdev->cacheclean_lock);
+}
+
+/**
+ * This function:
+ * - requeues the job from the runpool (if it was soft-stopped/removed from NEXT registers)
+ * - removes it from the system if it finished/failed/was cancelled.
+ * - resolves dependencies to add dependent jobs to the context, potentially starting them if necessary (which may add more references to the context)
+ * - releases the reference to the context from the no-longer-running job.
+ * - Handles retrying submission outside of IRQ context if it failed from within IRQ context.
+ */
+static void jd_done_worker(struct work_struct *data)
+{
+ kbase_jd_atom *katom = container_of(data, kbase_jd_atom, work);
+ kbase_jd_context *jctx;
+ kbase_context *kctx;
+ kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_policy *js_policy;
+ kbase_device *kbdev;
+ kbasep_js_device_data *js_devdata;
+ u64 cache_jc = katom->jc;
+ kbasep_js_atom_retained_state katom_retained_state;
+
+ /* Soft jobs should never reach this function */
+ KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+ kctx = katom->kctx;
+ jctx = &kctx->jctx;
+ kbdev = kctx->kbdev;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
+ /*
+ * Begin transaction on JD context and JS context
+ */
+ mutex_lock(&jctx->lock);
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* This worker only gets called on contexts that are scheduled *in*. This is
+ * because it only happens in response to an IRQ from a job that was
+ * running.
+ */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled != MALI_FALSE);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) && katom->event_code != BASE_JD_EVENT_DONE && !(katom->event_code & BASE_JD_SW_EVENT))
+ kbasep_jd_cacheclean(kbdev); /* cache flush when jobs complete with non-done codes */
+ else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
+ if (kbdev->gpu_props.num_core_groups > 1 &&
+ !(katom->affinity & kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
+ (katom->affinity & kbdev->gpu_props.props.coherency_info.group[1].core_mask)) {
+ KBASE_DEBUG_PRINT_INFO(KBASE_JD, "JD: Flushing cache due to PRLAM-10676\n");
+ kbasep_jd_cacheclean(kbdev);
+ }
+ }
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969) &&
+ (katom->core_req & BASE_JD_REQ_FS) &&
+ katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT &&
+ (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
+ !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)){
+ KBASE_DEBUG_PRINT_INFO(KBASE_JD,
+ " Soft-stopped fragment shader job got a TILE_RANGE_FAULT." \
+ "Possible HW issue, trying SW workaround \n " );
+ if (kbasep_10969_workaround_clamp_coordinates(katom)){
+ /* The job had a TILE_RANGE_FAULT after was soft-stopped.
+ * Due to an HW issue we try to execute the job
+ * again.
+ */
+ KBASE_DEBUG_PRINT_INFO(KBASE_JD, " Clamping has been executed, try to rerun the job \n" );
+ katom->event_code = BASE_JD_EVENT_STOPPED;
+ katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
+
+ /* The atom will be requeued, but requeing does not submit more
+ * jobs. If this was the last job, we must also ensure that more
+ * jobs will be run on slot 0 - this is a Fragment job. */
+ kbasep_js_set_job_retry_submit_slot(katom, 0);
+ }
+ }
+
+ /* If job was rejected due to BASE_JD_EVENT_PM_EVENT but was not
+ * specifically targeting core group 1, then re-submit targeting core
+ * group 0 */
+ if (katom->event_code == BASE_JD_EVENT_PM_EVENT && !(katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) {
+ katom->event_code = BASE_JD_EVENT_STOPPED;
+ /* Don't need to worry about any previously set retry-slot - it's
+ * impossible for it to have been set previously, because we guarantee
+ * kbase_jd_done() was called with done_code==0 on this atom */
+ kbasep_js_set_job_retry_submit_slot(katom, 1);
+ }
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+ kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+ /* Release cores this job was using (this might power down unused cores, and
+ * cause extra latency if a job submitted here - such as depenedent jobs -
+ * would use those cores) */
+ kbasep_js_job_check_deref_cores(kbdev, katom);
+
+ /* Retain state before the katom disappears */
+ kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+
+ if (!kbasep_js_has_atom_finished(&katom_retained_state)) {
+ unsigned long flags;
+ /* Requeue the atom on soft-stop / removed from NEXT registers */
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: Soft Stopped/Removed from next on Ctx %p; Requeuing", kctx);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ kbasep_js_clear_job_retry_submit(katom);
+
+ KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, katom));
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ kbasep_js_policy_enqueue_job(js_policy, katom);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ /* A STOPPED/REMOVED job must cause a re-submit to happen, in case it
+ * was the last job left. Crucially, work items on work queues can run
+ * out of order e.g. on different CPUs, so being able to submit from
+ * the IRQ handler is not a good indication that we don't need to run
+ * jobs; the submitted job could be processed on the work-queue
+ * *before* the stopped job, even though it was submitted after. */
+ {
+ int tmp;
+ KBASE_DEBUG_ASSERT(kbasep_js_get_atom_retry_submit_slot(&katom_retained_state, &tmp) != MALI_FALSE);
+ CSTD_UNUSED(tmp);
+ }
+
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ } else {
+ /* Remove the job from the system for all other reasons */
+ mali_bool need_to_try_schedule_context;
+
+ kbasep_js_remove_job(kbdev, kctx, katom);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
+
+ need_to_try_schedule_context = jd_done_nolock(katom);
+
+ /* This ctx is already scheduled in, so return value guarenteed FALSE */
+ KBASE_DEBUG_ASSERT(need_to_try_schedule_context == MALI_FALSE);
+ }
+ /* katom may have been freed now, do not use! */
+
+ /*
+ * Transaction complete
+ */
+ mutex_unlock(&jctx->lock);
+
+ /* Job is now no longer running, so can now safely release the context
+ * reference, and handle any actions that were logged against the atom's retained state */
+ kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0);
+}
+
+/**
+ * Work queue job cancel function
+ * Only called as part of 'Zapping' a context (which occurs on termination)
+ * Operates serially with the jd_done_worker() on the work queue.
+ *
+ * This can only be called on contexts that aren't scheduled.
+ *
+ * @note We don't need to release most of the resources that would occur on
+ * kbase_jd_done() or jd_done_worker(), because the atoms here must not be
+ * running (by virtue of only being called on contexts that aren't
+ * scheduled). The only resources that are an exception to this are:
+ * - those held by kbasep_js_job_check_ref_cores(), because these resources are
+ * held for non-running atoms as well as running atoms.
+ */
+static void jd_cancel_worker(struct work_struct *data)
+{
+ kbase_jd_atom *katom = container_of(data, kbase_jd_atom, work);
+ kbase_jd_context *jctx;
+ kbase_context *kctx;
+ kbasep_js_kctx_info *js_kctx_info;
+ mali_bool need_to_try_schedule_context;
+ kbase_device *kbdev;
+
+ /* Soft jobs should never reach this function */
+ KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+ kctx = katom->kctx;
+ kbdev = kctx->kbdev;
+ jctx = &kctx->jctx;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0);
+
+ /* This only gets called on contexts that are scheduled out. Hence, we must
+ * make sure we don't de-ref the number of running jobs (there aren't
+ * any), nor must we try to schedule out the context (it's already
+ * scheduled out).
+ */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
+
+ /* Release cores this job was using (this might power down unused cores) */
+ kbasep_js_job_check_deref_cores(kctx->kbdev, katom);
+
+ /* Scheduler: Remove the job from the system */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ kbasep_js_remove_cancelled_job(kbdev, kctx, katom);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ mutex_lock(&jctx->lock);
+
+ need_to_try_schedule_context = jd_done_nolock(katom);
+ /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
+ * schedule the context. There's also no need for the jsctx_mutex to have been taken
+ * around this too. */
+ KBASE_DEBUG_ASSERT(need_to_try_schedule_context == MALI_FALSE);
+
+ /* katom may have been freed now, do not use! */
+ mutex_unlock(&jctx->lock);
+
+}
+
+/**
+ * @brief Complete a job that has been removed from the Hardware
+ *
+ * This must be used whenever a job has been removed from the Hardware, e.g.:
+ * - An IRQ indicates that the job finished (for both error and 'done' codes)
+ * - The job was evicted from the JSn_HEAD_NEXT registers during a Soft/Hard stop.
+ *
+ * Some work is carried out immediately, and the rest is deferred onto a workqueue
+ *
+ * This can be called safely from atomic context.
+ *
+ * The caller must hold kbasep_js_device_data::runpool_irq::lock
+ *
+ */
+void kbase_jd_done(kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
+ kbasep_js_atom_done_code done_code)
+{
+ kbase_context *kctx;
+ kbase_device *kbdev;
+ KBASE_DEBUG_ASSERT(katom);
+ kctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(kctx);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT)
+ katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+
+ kbase_timeline_job_slot_done(kbdev, kctx, katom, slot_nr, done_code);
+
+ KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0);
+
+ kbasep_js_job_done_slot_irq(katom, slot_nr, end_timestamp, done_code);
+
+ katom->slot_nr = slot_nr;
+
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, jd_done_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_done)
+
+void kbase_jd_cancel(kbase_device *kbdev, kbase_jd_atom *katom)
+{
+ kbase_context *kctx;
+ kbasep_js_kctx_info *js_kctx_info;
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ KBASE_DEBUG_ASSERT(NULL != katom);
+ kctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
+
+ /* This should only be done from a context that is not scheduled */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
+
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+ KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
+ INIT_WORK(&katom->work, jd_cancel_worker);
+ queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+typedef struct zap_reset_data {
+ /* The stages are:
+ * 1. The timer has never been called
+ * 2. The zap has timed out, all slots are soft-stopped - the GPU reset will happen.
+ * The GPU has been reset when kbdev->reset_waitq is signalled
+ *
+ * (-1 - The timer has been cancelled)
+ */
+ int stage;
+ kbase_device *kbdev;
+ struct hrtimer timer;
+ spinlock_t lock;
+} zap_reset_data;
+
+static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
+{
+ zap_reset_data *reset_data = container_of(timer, zap_reset_data, timer);
+ kbase_device *kbdev = reset_data->kbdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&reset_data->lock, flags);
+
+ if (reset_data->stage == -1)
+ goto out;
+
+ if (kbase_prepare_to_reset_gpu(kbdev)) {
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n", ZAP_TIMEOUT);
+ kbase_reset_gpu(kbdev);
+ }
+
+ reset_data->stage = 2;
+
+ out:
+ spin_unlock_irqrestore(&reset_data->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+void kbase_jd_zap_context(kbase_context *kctx)
+{
+ kbase_jd_atom *katom;
+ struct list_head *entry;
+ kbase_device *kbdev;
+ zap_reset_data reset_data;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ kbdev = kctx->kbdev;
+
+ KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
+ kbase_job_zap_context(kctx);
+
+ mutex_lock(&kctx->jctx.lock);
+
+ /*
+ * While holding the kbase_jd_context lock clean up jobs which are known to kbase but are
+ * queued outside the job scheduler.
+ */
+
+ list_for_each(entry, &kctx->waiting_soft_jobs) {
+ katom = list_entry(entry, kbase_jd_atom, dep_item[0]);
+
+ kbase_cancel_soft_job(katom);
+ }
+
+#ifdef CONFIG_KDS
+
+ /* For each job waiting on a kds resource, cancel the wait and force the job to
+ * complete early, this is done so that we don't leave jobs outstanding waiting
+ * on kds resources which may never be released when contexts are zapped, resulting
+ * in a hang.
+ *
+ * Note that we can safely iterate over the list as the kbase_jd_context lock is held,
+ * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job.
+ */
+
+ list_for_each( entry, &kctx->waiting_kds_resource) {
+ katom = list_entry(entry, kbase_jd_atom, node);
+
+ kbase_cancel_kds_wait_job(katom);
+ }
+#endif
+
+ mutex_unlock(&kctx->jctx.lock);
+
+ hrtimer_init_on_stack(&reset_data.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ reset_data.timer.function = zap_timeout_callback;
+
+ spin_lock_init(&reset_data.lock);
+
+ reset_data.kbdev = kbdev;
+ reset_data.stage = 1;
+
+ hrtimer_start(&reset_data.timer, HR_TIMER_DELAY_MSEC(ZAP_TIMEOUT), HRTIMER_MODE_REL);
+
+ /* Wait for all jobs to finish, and for the context to be not-scheduled
+ * (due to kbase_job_zap_context(), we also guarentee it's not in the JS
+ * policy queue either */
+ wait_event(kctx->jctx.zero_jobs_wait, kctx->jctx.job_nr == 0);
+ wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait, kctx->jctx.sched_info.ctx.is_scheduled == MALI_FALSE);
+
+ spin_lock_irqsave(&reset_data.lock, flags);
+ if (reset_data.stage == 1) {
+ /* The timer hasn't run yet - so cancel it */
+ reset_data.stage = -1;
+ }
+ spin_unlock_irqrestore(&reset_data.lock, flags);
+
+ hrtimer_cancel(&reset_data.timer);
+
+ if (reset_data.stage == 2) {
+ /* The reset has already started.
+ * Wait for the reset to complete
+ */
+ wait_event(kbdev->reset_wait, atomic_read(&kbdev->reset_gpu) == KBASE_RESET_GPU_NOT_PENDING);
+ }
+ destroy_hrtimer_on_stack(&reset_data.timer);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Zap: Finished Context %p", kctx);
+
+ /* Ensure that the signallers of the waitqs have finished */
+ mutex_lock(&kctx->jctx.lock);
+ mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_unlock(&kctx->jctx.lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_zap_context)
+
+mali_error kbase_jd_init(kbase_context *kctx)
+{
+ int i;
+ mali_error mali_err = MALI_ERROR_NONE;
+#ifdef CONFIG_KDS
+ int err;
+#endif /* CONFIG_KDS */
+
+ KBASE_DEBUG_ASSERT(kctx);
+
+ kctx->jctx.job_done_wq = alloc_workqueue("mali_jd", 0, 1);
+ if (NULL == kctx->jctx.job_done_wq) {
+ mali_err = MALI_ERROR_OUT_OF_MEMORY;
+ goto out1;
+ }
+
+ for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
+ init_waitqueue_head(&kctx->jctx.atoms[i].completed);
+
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]);
+ INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]);
+
+ /* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */
+ kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
+ kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
+ }
+
+ mutex_init(&kctx->jctx.lock);
+
+ init_waitqueue_head(&kctx->jctx.zero_jobs_wait);
+
+ spin_lock_init(&kctx->jctx.tb_lock);
+
+#ifdef CONFIG_KDS
+ err = kds_callback_init(&kctx->jctx.kds_cb, 0, kds_dep_clear);
+ if (0 != err) {
+ mali_err = MALI_ERROR_FUNCTION_FAILED;
+ goto out2;
+ }
+#endif /* CONFIG_KDS */
+
+ kctx->jctx.job_nr = 0;
+
+ return MALI_ERROR_NONE;
+
+#ifdef CONFIG_KDS
+ out2:
+ destroy_workqueue(kctx->jctx.job_done_wq);
+#endif /* CONFIG_KDS */
+ out1:
+ return mali_err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_init)
+
+void kbase_jd_exit(kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kctx);
+
+#ifdef CONFIG_KDS
+ kds_callback_term(&kctx->jctx.kds_cb);
+#endif /* CONFIG_KDS */
+ /* Work queue is emptied by this */
+ destroy_workqueue(kctx->jctx.job_done_wq);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_exit)
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.c
new file mode 100755
index 00000000000..f162e4142a3
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.c
@@ -0,0 +1,1274 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_jm.c
+ * Base kernel job manager APIs
+ */
+
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_midg_regmap.h>
+#include <kbase/src/common/mali_kbase_gator.h>
+#include <kbase/src/common/mali_kbase_js_affinity.h>
+#include <kbase/src/common/mali_kbase_8401_workaround.h>
+#include <kbase/src/common/mali_kbase_hw.h>
+
+#include "mali_kbase_jm.h"
+
+#define beenthere(f, a...) KBASE_DEBUG_PRINT_INFO(KBASE_JM, "%s:" f, __func__, ##a)
+
+#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
+u64 mali_js0_affinity_mask = 0xFFFFFFFFFFFFFFFFULL;
+u64 mali_js1_affinity_mask = 0xFFFFFFFFFFFFFFFFULL;
+u64 mali_js2_affinity_mask = 0xFFFFFFFFFFFFFFFFULL;
+#endif
+
+
+static void kbasep_try_reset_gpu_early(kbase_device *kbdev);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+static char *kbasep_make_job_slot_string(int js, char *js_string)
+{
+ sprintf(js_string, "job_slot_%i", js);
+ return js_string;
+}
+#endif
+
+static void kbase_job_hw_submit(kbase_device *kbdev, kbase_jd_atom *katom, int js)
+{
+ kbase_context *kctx;
+ u32 cfg;
+ u64 jc_head = katom->jc;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(katom);
+
+ kctx = katom->kctx;
+
+ /* Command register must be available */
+ KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx));
+ /* Affinity is not violating */
+ kbase_js_debug_log_current_affinities(kbdev);
+ KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js, katom->affinity));
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), jc_head & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), jc_head >> 32, kctx);
+
+#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
+ {
+ u64 mask;
+ u32 value;
+
+ if( 0 == js )
+ {
+ mask = mali_js0_affinity_mask;
+ }
+ else if( 1 == js )
+ {
+ mask = mali_js1_affinity_mask;
+ }
+ else
+ {
+ mask = mali_js2_affinity_mask;
+ }
+
+ value = katom->affinity & (mask & 0xFFFFFFFF);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), value, kctx);
+
+ value = (katom->affinity >> 32) & ((mask>>32) & 0xFFFFFFFF);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), value, kctx);
+ }
+#else
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), katom->affinity & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), katom->affinity >> 32, kctx);
+#endif
+
+ /* start MMU, medium priority, cache clean/flush on end, clean/flush on start */
+ cfg = kctx->as_nr | JSn_CONFIG_END_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_START_MMU | JSn_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_THREAD_PRI(8);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_CONFIG_NEXT), cfg, kctx);
+
+ /* Write an approximate start timestamp.
+ * It's approximate because there might be a job in the HEAD register. In
+ * such cases, we'll try to make a better approximation in the IRQ handler
+ * (up to the KBASE_JS_IRQ_THROTTLE_TIME_US). */
+ katom->start_timestamp = ktime_get();
+
+ /* GO ! */
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx", katom, kctx, js, jc_head, katom->affinity);
+
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js, (u32) katom->affinity);
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+ kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_START, js), kctx);
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+#ifdef CONFIG_GPU_TRACEPOINTS
+ if (kbasep_jm_nr_jobs_submitted(&kbdev->jm_slots[js]) == 1)
+ {
+ /* If this is the only job on the slot, trace it as starting */
+ char js_string[16];
+ trace_gpu_sched_switch(kbasep_make_job_slot_string(js, js_string), ktime_to_ns(katom->start_timestamp), (u32)katom->kctx, 0, katom->work_id);
+ kbdev->jm_slots[js].last_context = katom->kctx;
+ }
+#endif
+ kbase_timeline_job_slot_submit(kbdev, kctx, katom, js);
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_START, katom->kctx);
+}
+
+void kbase_job_submit_nolock(kbase_device *kbdev, kbase_jd_atom *katom, int js)
+{
+ kbase_jm_slot *jm_slots;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ jm_slots = kbdev->jm_slots;
+
+ /*
+ * We can have:
+ * - one job already done (pending interrupt),
+ * - one running,
+ * - one ready to be run.
+ * Hence a maximum of 3 inflight jobs. We have a 4 job
+ * queue, which I hope will be enough...
+ */
+ kbasep_jm_enqueue_submit_slot(&jm_slots[js], katom);
+ kbase_job_hw_submit(kbdev, katom, js);
+}
+
+void kbase_job_done_slot(kbase_device *kbdev, int s, u32 completion_code, u64 job_tail, ktime_t *end_timestamp)
+{
+ kbase_jm_slot *slot;
+ kbase_jd_atom *katom;
+ mali_addr64 jc_head;
+ kbase_context *kctx;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ if (completion_code != BASE_JD_EVENT_DONE && completion_code != BASE_JD_EVENT_STOPPED)
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JM, "t6xx: GPU fault 0x%02lx from job slot %d\n", (unsigned long)completion_code, s);
+
+ /* IMPORTANT: this function must only contain work necessary to complete a
+ * job from a Real IRQ (and not 'fake' completion, e.g. from
+ * Soft-stop). For general work that must happen no matter how the job was
+ * removed from the hardware, place it in kbase_jd_done() */
+
+ slot = &kbdev->jm_slots[s];
+ katom = kbasep_jm_dequeue_submit_slot(slot);
+
+ /* If the katom completed is because it's a dummy job for HW workarounds, then take no further action */
+ if (kbasep_jm_is_dummy_workaround_job(kbdev, katom)) {
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, NULL, NULL, 0, s, completion_code);
+ return;
+ }
+
+ jc_head = katom->jc;
+ kctx = katom->kctx;
+
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, kctx, katom, jc_head, s, completion_code);
+
+ if (completion_code != BASE_JD_EVENT_DONE && completion_code != BASE_JD_EVENT_STOPPED) {
+
+#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
+ KBASE_TRACE_DUMP(kbdev);
+#endif
+ }
+ if (job_tail != 0) {
+ mali_bool was_updated = (job_tail != jc_head);
+ /* Some of the job has been executed, so we update the job chain address to where we should resume from */
+ katom->jc = job_tail;
+ if (was_updated)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_UPDATE_HEAD, kctx, katom, job_tail, s);
+ }
+
+ /* Only update the event code for jobs that weren't cancelled */
+ if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED)
+ katom->event_code = (base_jd_event_code) completion_code;
+
+ kbase_device_trace_register_access(kctx, REG_WRITE, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 1 << s);
+
+ /* Complete the job, and start new ones
+ *
+ * Also defer remaining work onto the workqueue:
+ * - Re-queue Soft-stopped jobs
+ * - For any other jobs, queue the job back into the dependency system
+ * - Schedule out the parent context if necessary, and schedule a new one in.
+ */
+#ifdef CONFIG_GPU_TRACEPOINTS
+ if (kbasep_jm_nr_jobs_submitted(slot) != 0) {
+ kbase_jd_atom *katom;
+ char js_string[16];
+ katom = kbasep_jm_peek_idx_submit_slot(slot, 0); /* The atom in the HEAD */
+ trace_gpu_sched_switch(kbasep_make_job_slot_string(s, js_string), ktime_to_ns(*end_timestamp), (u32)katom->kctx, 0, katom->work_id);
+ slot->last_context = katom->kctx;
+ } else {
+ char js_string[16];
+ trace_gpu_sched_switch(kbasep_make_job_slot_string(s, js_string), ktime_to_ns(ktime_get()), 0, 0, 0);
+ slot->last_context = 0;
+ }
+#endif
+ kbase_jd_done(katom, s, end_timestamp, KBASE_JS_ATOM_DONE_START_NEW_ATOMS);
+}
+
+/**
+ * Update the start_timestamp of the job currently in the HEAD, based on the
+ * fact that we got an IRQ for the previous set of completed jobs.
+ *
+ * The estimate also takes into account the KBASE_JS_IRQ_THROTTLE_TIME_US and
+ * the time the job was submitted, to work out the best estimate (which might
+ * still result in an over-estimate to the calculated time spent)
+ */
+STATIC void kbasep_job_slot_update_head_start_timestamp(kbase_device *kbdev, kbase_jm_slot *slot, ktime_t end_timestamp)
+{
+ KBASE_DEBUG_ASSERT(slot);
+
+ if (kbasep_jm_nr_jobs_submitted(slot) > 0) {
+ kbase_jd_atom *katom;
+ ktime_t new_timestamp;
+ ktime_t timestamp_diff;
+ katom = kbasep_jm_peek_idx_submit_slot(slot, 0); /* The atom in the HEAD */
+
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ if (kbasep_jm_is_dummy_workaround_job(kbdev, katom) != MALI_FALSE) {
+ /* Don't access the members of HW workaround 'dummy' jobs */
+ return;
+ }
+
+ /* Account for any IRQ Throttle time - makes an overestimate of the time spent by the job */
+ new_timestamp = ktime_sub_ns(end_timestamp, KBASE_JS_IRQ_THROTTLE_TIME_US * 1000);
+ timestamp_diff = ktime_sub(new_timestamp, katom->start_timestamp);
+ if (ktime_to_ns(timestamp_diff) >= 0) {
+ /* Only update the timestamp if it's a better estimate than what's currently stored.
+ * This is because our estimate that accounts for the throttle time may be too much
+ * of an overestimate */
+ katom->start_timestamp = new_timestamp;
+ }
+ }
+}
+
+void kbase_job_done(kbase_device *kbdev, u32 done)
+{
+ unsigned long flags;
+ int i;
+ u32 count = 0;
+ ktime_t end_timestamp = ktime_get();
+ kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done);
+
+ memset(&kbdev->slot_submit_count_irq[0], 0, sizeof(kbdev->slot_submit_count_irq));
+
+ /* write irq throttle register, this will prevent irqs from occurring until
+ * the given number of gpu clock cycles have passed */
+ {
+ int irq_throttle_cycles = atomic_read(&kbdev->irq_throttle_cycles);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_THROTTLE), irq_throttle_cycles, NULL);
+ }
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ while (done) {
+ kbase_jm_slot *slot;
+ u32 failed = done >> 16;
+
+ /* treat failed slots as finished slots */
+ u32 finished = (done & 0xFFFF) | failed;
+
+ /* Note: This is inherently unfair, as we always check
+ * for lower numbered interrupts before the higher
+ * numbered ones.*/
+ i = ffs(finished) - 1;
+ KBASE_DEBUG_ASSERT(i >= 0);
+
+ slot = &kbdev->jm_slots[i];
+
+ do {
+ int nr_done;
+ u32 active;
+ u32 completion_code = BASE_JD_EVENT_DONE; /* assume OK */
+ u64 job_tail = 0;
+
+ if (failed & (1u << i)) {
+ /* read out the job slot status code if the job slot reported failure */
+ completion_code = kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_STATUS), NULL);
+
+ switch (completion_code) {
+ case BASE_JD_EVENT_STOPPED:
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+ kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_SOFT_STOPPED, i), NULL);
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+ /* Soft-stopped job - read the value of JS<n>_TAIL so that the job chain can be resumed */
+ job_tail = (u64) kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_TAIL_LO), NULL) | ((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_TAIL_HI), NULL) << 32);
+ break;
+ case BASE_JD_EVENT_NOT_STARTED:
+ /* PRLAM-10673 can cause a TERMINATED job to come back as NOT_STARTED, but the error interrupt helps us detect it */
+ completion_code = BASE_JD_EVENT_TERMINATED;
+ /* fall throught */
+ default:
+ KBASE_DEBUG_PRINT_WARN(KBASE_JD, "error detected from slot %d, job status 0x%08x (%s)", i, completion_code, kbase_exception_name(completion_code));
+ }
+ }
+
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), done & ((1 << i) | (1 << (i + 16))), NULL);
+ active = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE), NULL);
+
+ if (((active >> i) & 1) == 0 && (((done >> (i + 16)) & 1) == 0)) {
+ /* There is a potential race we must work around:
+ *
+ * 1. A job slot has a job in both current and next registers
+ * 2. The job in current completes successfully, the IRQ handler reads RAWSTAT
+ * and calls this function with the relevant bit set in "done"
+ * 3. The job in the next registers becomes the current job on the GPU
+ * 4. Sometime before the JOB_IRQ_CLEAR line above the job on the GPU _fails_
+ * 5. The IRQ_CLEAR clears the done bit but not the failed bit. This atomically sets
+ * JOB_IRQ_JS_STATE. However since both jobs have now completed the relevant bits
+ * for the slot are set to 0.
+ *
+ * If we now did nothing then we'd incorrectly assume that _both_ jobs had completed
+ * successfully (since we haven't yet observed the fail bit being set in RAWSTAT).
+ *
+ * So at this point if there are no active jobs left we check to see if RAWSTAT has a failure
+ * bit set for the job slot. If it does we know that there has been a new failure that we
+ * didn't previously know about, so we make sure that we record this in active (but we wait
+ * for the next loop to deal with it).
+ *
+ * If we were handling a job failure (i.e. done has the relevant high bit set) then we know that
+ * the value read back from JOB_IRQ_JS_STATE is the correct number of remaining jobs because
+ * the failed job will have prevented any futher jobs from starting execution.
+ */
+ u32 rawstat = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
+
+ if ((rawstat >> (i + 16)) & 1) {
+ /* There is a failed job that we've missed - add it back to active */
+ active |= (1u << i);
+ }
+ }
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Job ended with status 0x%08X\n", completion_code);
+
+ nr_done = kbasep_jm_nr_jobs_submitted(slot);
+ nr_done -= (active >> i) & 1;
+ nr_done -= (active >> (i + 16)) & 1;
+
+ if (nr_done <= 0) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_JM, "Spurious interrupt on slot %d", i);
+ goto spurious;
+ }
+
+ count += nr_done;
+
+ while (nr_done) {
+ if (nr_done == 1) {
+ kbase_job_done_slot(kbdev, i, completion_code, job_tail, &end_timestamp);
+ } else {
+ /* More than one job has completed. Since this is not the last job being reported this time it
+ * must have passed. This is because the hardware will not allow further jobs in a job slot to
+ * complete until the faile job is cleared from the IRQ status.
+ */
+ kbase_job_done_slot(kbdev, i, BASE_JD_EVENT_DONE, 0, &end_timestamp);
+ }
+ nr_done--;
+ }
+
+ spurious:
+ done = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) {
+ /* Workaround for missing interrupt caused by PRLAM-10883 */
+ if (((active >> i) & 1) && (0 == kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_STATUS), NULL))) {
+ /* Force job slot to be processed again */
+ done |= (1u << i);
+ }
+ }
+
+ failed = done >> 16;
+ finished = (done & 0xFFFF) | failed;
+ } while (finished & (1 << i));
+
+ kbasep_job_slot_update_head_start_timestamp(kbdev, slot, end_timestamp);
+ }
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ if (atomic_read(&kbdev->reset_gpu) == KBASE_RESET_GPU_COMMITTED) {
+ /* If we're trying to reset the GPU then we might be able to do it early
+ * (without waiting for a timeout) because some jobs have completed
+ */
+ kbasep_try_reset_gpu_early(kbdev);
+ }
+
+ KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count);
+}
+KBASE_EXPORT_TEST_API(kbase_job_done)
+
+static mali_bool kbasep_soft_stop_allowed(kbase_device *kbdev, u16 core_reqs)
+{
+ mali_bool soft_stops_allowed = MALI_TRUE;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
+ if ((core_reqs & BASE_JD_REQ_T) != 0)
+ soft_stops_allowed = MALI_FALSE;
+ }
+ return soft_stops_allowed;
+}
+
+static mali_bool kbasep_hard_stop_allowed(kbase_device *kbdev, u16 core_reqs)
+{
+ mali_bool hard_stops_allowed = MALI_TRUE;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8394)) {
+ if ((core_reqs & BASE_JD_REQ_T) != 0)
+ hard_stops_allowed = MALI_FALSE;
+ }
+ return hard_stops_allowed;
+}
+
+static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int js, u32 action, u16 core_reqs, kbase_jd_atom * target_katom )
+{
+ kbase_context *kctx = target_katom->kctx;
+#if KBASE_TRACE_ENABLE
+ u32 status_reg_before;
+ u64 job_in_head_before;
+ u32 status_reg_after;
+
+ /* Check the head pointer */
+ job_in_head_before = ((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_LO), NULL))
+ | (((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_HI), NULL)) << 32);
+ status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_STATUS), NULL);
+#endif
+
+ if (action == JSn_COMMAND_SOFT_STOP) {
+ mali_bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev, core_reqs);
+ if (!soft_stop_allowed) {
+#ifdef CONFIG_MALI_DEBUG
+ KBASE_DEBUG_PRINT(KBASE_JM, "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X", (unsigned int)core_reqs);
+#endif /* CONFIG_MALI_DEBUG */
+ return;
+ }
+
+ /* We are about to issue a soft stop, so mark the atom as having been soft stopped */
+ target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED;
+ }
+
+ if (action == JSn_COMMAND_HARD_STOP) {
+ mali_bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev, core_reqs);
+ if (!hard_stop_allowed) {
+ /* Jobs can be hard-stopped for the following reasons:
+ * * CFS decides the job has been running too long (and soft-stop has not occurred).
+ * In this case the GPU will be reset by CFS if the job remains on the GPU.
+ *
+ * * The context is destroyed, kbase_jd_zap_context will attempt to hard-stop the job. However
+ * it also has a watchdog which will cause the GPU to be reset if the job remains on the GPU.
+ *
+ * * An (unhandled) MMU fault occurred. As long as BASE_HW_ISSUE_8245 is defined then
+ * the GPU will be reset.
+ *
+ * All three cases result in the GPU being reset if the hard-stop fails,
+ * so it is safe to just return and ignore the hard-stop request.
+ */
+ KBASE_DEBUG_PRINT_WARN(KBASE_JM, "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X", (unsigned int)core_reqs);
+ return;
+ }
+ }
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316) && action == JSn_COMMAND_SOFT_STOP) {
+ int i;
+ kbase_jm_slot *slot;
+ slot = &kbdev->jm_slots[js];
+
+ for (i = 0; i < kbasep_jm_nr_jobs_submitted(slot); i++) {
+ kbase_jd_atom *katom;
+
+ katom = kbasep_jm_peek_idx_submit_slot(slot, i);
+
+ KBASE_DEBUG_ASSERT(katom);
+
+ if (kbasep_jm_is_dummy_workaround_job(kbdev, katom) != MALI_FALSE) {
+ /* Don't access the members of HW workaround 'dummy' jobs
+ *
+ * This assumes that such jobs can't cause HW_ISSUE_8316, and could only be blocked
+ * by other jobs causing HW_ISSUE_8316 (which will get poked/or eventually get killed) */
+ continue;
+ }
+
+ /* For HW_ISSUE_8316, only 'bad' jobs attacking the system can
+ * cause this issue: normally, all memory should be allocated in
+ * multiples of 4 pages, and growable memory should be changed size
+ * in multiples of 4 pages.
+ *
+ * Whilst such 'bad' jobs can be cleared by a GPU reset, the
+ * locking up of a uTLB entry caused by the bad job could also
+ * stall other ASs, meaning that other ASs' jobs don't complete in
+ * the 'grace' period before the reset. We don't want to lose other
+ * ASs' jobs when they would normally complete fine, so we must
+ * 'poke' the MMU regularly to help other ASs complete */
+ kbase_as_poking_timer_retain_atom(kbdev, katom->kctx, katom);
+ }
+ }
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND), action, kctx);
+
+
+
+#if KBASE_TRACE_ENABLE
+ status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_STATUS), NULL);
+ if (status_reg_after == BASE_JD_EVENT_ACTIVE) {
+ kbase_jm_slot *slot;
+ kbase_jd_atom *head;
+ kbase_context *head_kctx;
+
+ slot = &kbdev->jm_slots[js];
+ head = kbasep_jm_peek_idx_submit_slot(slot, slot->submitted_nr - 1);
+ head_kctx = head->kctx;
+
+ /* We don't need to check kbasep_jm_is_dummy_workaround_job( head ) here:
+ * - Members are not indirected through
+ * - The members will all be zero anyway
+ */
+ if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, head_kctx, head, job_in_head_before, js);
+ else
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, 0, js);
+
+ if (action == JSn_COMMAND_SOFT_STOP)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx, head, head->jc, js);
+ else
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx, head, head->jc, js);
+ } else {
+ if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, job_in_head_before, js);
+ else
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, 0, js);
+
+ if (action == JSn_COMMAND_SOFT_STOP)
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0, js);
+ else
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0, js);
+ }
+#endif
+}
+
+/* Helper macros used by kbasep_job_slot_soft_or_hard_stop */
+#define JM_SLOT_MAX_JOB_SUBMIT_REGS 2
+#define JM_JOB_IS_CURRENT_JOB_INDEX(n) (1 == n) /* Index of the last job to process */
+#define JM_JOB_IS_NEXT_JOB_INDEX(n) (2 == n) /* Index of the prior to last job to process */
+
+/** Soft or hard-stop a slot
+ *
+ * This function safely ensures that the correct job is either hard or soft-stopped.
+ * It deals with evicting jobs from the next registers where appropriate.
+ *
+ * This does not attempt to stop or evict jobs that are 'dummy' jobs for HW workarounds.
+ *
+ * @param kbdev The kbase device
+ * @param kctx The context to soft/hard-stop job(s) from (or NULL is all jobs should be targeted)
+ * @param js The slot that the job(s) are on
+ * @param target_katom The atom that should be targeted (or NULL if all jobs from the context should be targeted)
+ * @param action The action to perform, either JSn_COMMAND_HARD_STOP or JSn_COMMAND_SOFT_STOP
+ */
+static void kbasep_job_slot_soft_or_hard_stop(kbase_device *kbdev, kbase_context *kctx, int js, kbase_jd_atom *target_katom, u32 action)
+{
+ kbase_jd_atom *katom;
+ u8 i;
+ u8 jobs_submitted;
+ kbase_jm_slot *slot;
+ u16 core_reqs;
+ kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(action == JSn_COMMAND_HARD_STOP || action == JSn_COMMAND_SOFT_STOP);
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+
+ slot = &kbdev->jm_slots[js];
+ KBASE_DEBUG_ASSERT(slot);
+ lockdep_assert_held(&js_devdata->runpool_irq.lock);
+
+ jobs_submitted = kbasep_jm_nr_jobs_submitted(slot);
+
+ KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, 1);
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SLOT_SOFT_OR_HARD_STOP, kctx, NULL, 0u, js, jobs_submitted);
+
+ if (jobs_submitted > JM_SLOT_MAX_JOB_SUBMIT_REGS)
+ i = jobs_submitted - JM_SLOT_MAX_JOB_SUBMIT_REGS;
+ else
+ i = 0;
+
+ /* Loop through all jobs that have been submitted to the slot and haven't completed */
+ for (; i < jobs_submitted; i++) {
+ katom = kbasep_jm_peek_idx_submit_slot(slot, i);
+
+ if (kctx && katom->kctx != kctx)
+ continue;
+
+ if (target_katom && katom != target_katom)
+ continue;
+
+ if (kbasep_jm_is_dummy_workaround_job(kbdev, katom))
+ continue;
+
+ core_reqs = katom->core_req;
+
+ if (JM_JOB_IS_CURRENT_JOB_INDEX(jobs_submitted - i)) {
+ /* The last job in the slot, check if there is a job in the next register */
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), NULL) == 0) {
+ kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, action, core_reqs, katom);
+ } else {
+ /* The job is in the next registers */
+ beenthere("clearing job from next registers on slot %d", js);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_NOP, NULL);
+
+ /* Check to see if we did remove a job from the next registers */
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), NULL) != 0) {
+ /* The job was successfully cleared from the next registers, requeue it */
+ kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
+ KBASE_DEBUG_ASSERT(dequeued_katom == katom);
+ jobs_submitted--;
+
+ /* Set the next registers to NULL */
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), 0, NULL);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), 0, NULL);
+
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SLOT_EVICT, dequeued_katom->kctx, dequeued_katom, dequeued_katom->jc, js);
+
+ /* Complete the job, indicate it took no time, but don't submit any more at this point */
+ kbase_jd_done(dequeued_katom, js, NULL, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
+ } else {
+ /* The job transitioned into the current registers before we managed to evict it,
+ * in this case we fall back to soft/hard-stopping the job */
+ beenthere("missed job in next register, soft/hard-stopping slot %d", js);
+ kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, action, core_reqs, katom);
+ }
+ }
+ } else if (JM_JOB_IS_NEXT_JOB_INDEX(jobs_submitted - i)) {
+ /* There's a job after this one, check to see if that job is in the next registers */
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), NULL) != 0) {
+ kbase_jd_atom *check_next_atom;
+ /* It is - we should remove that job and soft/hard-stop the slot */
+
+ /* Only proceed when the next jobs isn't a HW workaround 'dummy' job
+ *
+ * This can't be an ASSERT due to MMU fault code:
+ * - This first hard-stops the job that caused the fault
+ * - Under HW Issue 8401, this inserts a dummy workaround job into NEXT
+ * - Under HW Issue 8245, it will then reset the GPU
+ * - This causes a Soft-stop to occur on all slots
+ * - By the time of the soft-stop, we may (depending on timing) still have:
+ * - The original job in HEAD, if it's not finished the hard-stop
+ * - The dummy workaround job in NEXT
+ *
+ * Other cases could be coded in future that cause back-to-back Soft/Hard
+ * stops with dummy workaround jobs in place, e.g. MMU handler code and Job
+ * Scheduler watchdog timer running in parallel.
+ *
+ * Note, the index i+1 is valid to peek from: i == jobs_submitted-2, therefore
+ * i+1 == jobs_submitted-1 */
+ check_next_atom = kbasep_jm_peek_idx_submit_slot(slot, i + 1);
+ if (kbasep_jm_is_dummy_workaround_job(kbdev, check_next_atom) != MALI_FALSE)
+ continue;
+
+ beenthere("clearing job from next registers on slot %d", js);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_NOP, NULL);
+
+ /* Check to see if we did remove a job from the next registers */
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), NULL) != 0) {
+ /* We did remove a job from the next registers, requeue it */
+ kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
+ KBASE_DEBUG_ASSERT(dequeued_katom != NULL);
+ jobs_submitted--;
+
+ /* Set the next registers to NULL */
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), 0, NULL);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), 0, NULL);
+
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SLOT_EVICT, dequeued_katom->kctx, dequeued_katom, dequeued_katom->jc, js);
+
+ /* Complete the job, indicate it took no time, but don't submit any more at this point */
+ kbase_jd_done(dequeued_katom, js, NULL, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
+ } else {
+ /* We missed the job, that means the job we're interested in left the hardware before
+ * we managed to do anything, so we can proceed to the next job */
+ continue;
+ }
+
+ /* Next is now free, so we can soft/hard-stop the slot */
+ beenthere("soft/hard-stopped slot %d (there was a job in next which was successfully cleared)\n", js);
+ kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, action, core_reqs, katom);
+ }
+ /* If there was no job in the next registers, then the job we were
+ * interested in has finished, so we need not take any action
+ */
+ }
+ }
+
+ KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, 0);
+}
+
+void kbase_job_kill_jobs_from_context(kbase_context *kctx)
+{
+ unsigned long flags;
+ kbase_device *kbdev;
+ kbasep_js_device_data *js_devdata;
+ int i;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ /* Cancel any remaining running jobs for this kctx */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ kbase_job_slot_hardstop(kctx, i, NULL);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+}
+
+void kbase_job_zap_context(kbase_context *kctx)
+{
+ kbase_device *kbdev;
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_info;
+ int i;
+ mali_bool evict_success;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /*
+ * Critical assumption: No more submission is possible outside of the
+ * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
+ * whilst the kbase_context is terminating.
+ */
+
+ /* First, atomically do the following:
+ * - mark the context as dying
+ * - try to evict it from the policy queue */
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ js_kctx_info->ctx.is_dying = MALI_TRUE;
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Zap: Try Evict Ctx %p", kctx);
+ mutex_lock(&js_devdata->queue_mutex);
+ evict_success = kbasep_js_policy_try_evict_ctx(&js_devdata->policy, kctx);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ /*
+ * At this point we know:
+ * - If eviction succeeded, it was in the policy queue, but now no longer is
+ * - We must cancel the jobs here. No Power Manager active reference to
+ * release.
+ * - This happens asynchronously - kbase_jd_zap_context() will wait for
+ * those jobs to be killed.
+ * - If eviction failed, then it wasn't in the policy queue. It is one of
+ * the following:
+ * - a. it didn't have any jobs, and so is not in the Policy Queue or the
+ * Run Pool (not scheduled)
+ * - Hence, no more work required to cancel jobs. No Power Manager active
+ * reference to release.
+ * - b. it was in the middle of a scheduling transaction (and thus must
+ * have at least 1 job). This can happen from a syscall or a kernel thread.
+ * We still hold the jsctx_mutex, and so the thread must be waiting inside
+ * kbasep_js_try_schedule_head_ctx(), before checking whether the runpool
+ * is full. That thread will continue after we drop the mutex, and will
+ * notice the context is dying. It will rollback the transaction, killing
+ * all jobs at the same time. kbase_jd_zap_context() will wait for those
+ * jobs to be killed.
+ * - Hence, no more work required to cancel jobs, or to release the Power
+ * Manager active reference.
+ * - c. it is scheduled, and may or may not be running jobs
+ * - We must cause it to leave the runpool by stopping it from submitting
+ * any more jobs. When it finally does leave,
+ * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs
+ * (because it is dying), release the Power Manager active reference, and
+ * will not requeue the context in the policy queue. kbase_jd_zap_context()
+ * will wait for those jobs to be killed.
+ * - Hence, work required just to make it leave the runpool. Cancelling
+ * jobs and releasing the Power manager active reference will be handled
+ * when it leaves the runpool.
+ */
+
+ if (evict_success != MALI_FALSE || js_kctx_info->ctx.is_scheduled == MALI_FALSE) {
+ /* The following events require us to kill off remaining jobs and
+ * update PM book-keeping:
+ * - we evicted it correctly (it must have jobs to be in the Policy Queue)
+ *
+ * These events need no action, but take this path anyway:
+ * - Case a: it didn't have any jobs, and was never in the Queue
+ * - Case b: scheduling transaction will be partially rolled-back (this
+ * already cancels the jobs)
+ */
+
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u, js_kctx_info->ctx.is_scheduled);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Zap: Ctx %p evict_success=%d, scheduled=%d", kctx, evict_success, js_kctx_info->ctx.is_scheduled);
+
+ if (evict_success != MALI_FALSE) {
+ /* Only cancel jobs when we evicted from the policy queue. No Power
+ * Manager active reference was held.
+ *
+ * Having is_dying set ensures that this kills, and doesn't requeue */
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, MALI_FALSE);
+ }
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ } else {
+ unsigned long flags;
+ mali_bool was_retained;
+ /* Case c: didn't evict, but it is scheduled - it's in the Run Pool */
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u, js_kctx_info->ctx.is_scheduled);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Zap: Ctx %p is in RunPool", kctx);
+
+ /* Disable the ctx from submitting any more jobs */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+ /* Retain and (later) release the context whilst it is is now disallowed from submitting
+ * jobs - ensures that someone somewhere will be removing the context later on */
+ was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+
+ /* Since it's scheduled and we have the jsctx_mutex, it must be retained successfully */
+ KBASE_DEBUG_ASSERT(was_retained != MALI_FALSE);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Zap: Ctx %p Kill Any Running jobs", kctx);
+ /* Cancel any remaining running jobs for this kctx - if any. Submit is disallowed
+ * which takes effect immediately, so no more new jobs will appear after we do this. */
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ kbase_job_slot_hardstop(kctx, i, NULL);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Zap: Ctx %p Release (may or may not schedule out immediately)", kctx);
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+ }
+ KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
+
+ /* After this, you must wait on both the kbase_jd_context::zero_jobs_wait
+ * and the kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the
+ * jobs to be destroyed, and the context to be de-scheduled (if it was on
+ * the runpool).
+ *
+ * kbase_jd_zap_context() will do this. */
+}
+KBASE_EXPORT_TEST_API(kbase_job_zap_context)
+
+mali_error kbase_job_slot_init(kbase_device *kbdev)
+{
+ int i;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ kbasep_jm_init_submit_slot(&kbdev->jm_slots[i]);
+
+ return MALI_ERROR_NONE;
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_init)
+
+void kbase_job_slot_halt(kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbase_job_slot_term(kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_term)
+
+/**
+ * Soft-stop the specified job slot
+ *
+ * The job slot lock must be held when calling this function.
+ * The job slot must not already be in the process of being soft-stopped.
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ *
+ * @param kbdev The kbase device
+ * @param js The job slot to soft-stop
+ * @param target_katom The job that should be soft-stopped (or NULL for any job)
+ */
+void kbase_job_slot_softstop(kbase_device *kbdev, int js, kbase_jd_atom *target_katom)
+{
+ kbasep_job_slot_soft_or_hard_stop(kbdev, NULL, js, target_katom, JSn_COMMAND_SOFT_STOP);
+}
+
+/**
+ * Hard-stop the specified job slot
+ *
+ * The job slot lock must be held when calling this function.
+ *
+ * @param kctx The kbase context that contains the job(s) that should be hard-stopped
+ * @param js The job slot to hard-stop
+ * @param target_katom The job that should be hard-stopped (or NULL for all jobs from the context)
+ */
+void kbase_job_slot_hardstop(kbase_context *kctx, int js, kbase_jd_atom *target_katom)
+{
+ kbase_device *kbdev = kctx->kbdev;
+ kbasep_job_slot_soft_or_hard_stop(kbdev, kctx, js, target_katom, JSn_COMMAND_HARD_STOP);
+
+ if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) || kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510)) {
+ /* The workaround for HW issue 8401 has an issue, so instead of hard-stopping
+ * just reset the GPU. This will ensure that the jobs leave the GPU.
+ */
+ if (kbase_prepare_to_reset_gpu_locked(kbdev)) {
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Issueing GPU soft-reset instead of hard stopping job due to a hardware issue");
+ kbase_reset_gpu_locked(kbdev);
+ }
+ }
+}
+
+void kbasep_reset_timeout_worker(struct work_struct *data)
+{
+ unsigned long flags;
+ kbase_device *kbdev;
+ int i;
+ ktime_t end_timestamp = ktime_get();
+ kbasep_js_device_data *js_devdata;
+ kbase_uk_hwcnt_setup hwcnt_setup = { {0} };
+ kbase_instr_state bckp_state;
+
+ KBASE_DEBUG_ASSERT(data);
+
+ kbdev = container_of(data, kbase_device, reset_work);
+
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0);
+
+ /* Make sure the timer has completed - this cannot be done from interrupt context,
+ * so this cannot be done within kbasep_try_reset_gpu_early. */
+ hrtimer_cancel(&kbdev->reset_timer);
+
+ if (kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ /* This would re-activate the GPU. Since it's already idle, there's no
+ * need to reset it */
+ atomic_set(&kbdev->reset_gpu, KBASE_RESET_GPU_NOT_PENDING);
+ wake_up(&kbdev->reset_wait);
+ return;
+ }
+
+ mutex_lock(&kbdev->pm.lock);
+ /* We hold the pm lock, so there ought to be a current policy */
+ KBASE_DEBUG_ASSERT(kbdev->pm.pm_current_policy);
+
+ /* All slot have been soft-stopped and we've waited SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point
+ * we assume that anything that is still left on the GPU is stuck there and we'll kill it when we reset the GPU */
+
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Resetting GPU (allowing up to %d ms)", RESET_TIMEOUT);
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) { /*the same interrupt handler preempted itself */
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+ /* Save the HW counters setup */
+ if (kbdev->hwcnt.kctx != NULL) {
+ kbase_context *kctx = kbdev->hwcnt.kctx;
+ hwcnt_setup.dump_buffer = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), kctx) & 0xffffffff;
+ hwcnt_setup.dump_buffer |= (mali_addr64) kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), kctx) << 32;
+ hwcnt_setup.jm_bm = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), kctx);
+ hwcnt_setup.shader_bm = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), kctx);
+ hwcnt_setup.tiler_bm = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), kctx);
+ hwcnt_setup.l3_cache_bm = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_L3_CACHE_EN), kctx);
+ hwcnt_setup.mmu_l2_bm = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), kctx);
+ }
+ bckp_state = kbdev->hwcnt.state;
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_RESETTING;
+ kbdev->hwcnt.triggered = 0;
+ /* Disable IRQ to avoid IRQ handlers to kick in after releaseing the spinlock;
+ * this also clears any outstanding interrupts */
+ kbase_pm_disable_interrupts(kbdev);
+ /* Ensure that any IRQ handlers have finished */
+ kbase_synchronize_irqs(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Reset the GPU */
+ kbase_pm_init_hw(kbdev, MALI_TRUE);
+ /* IRQs were re-enabled by kbase_pm_init_hw, and GPU is still powered */
+
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Restore the HW counters setup */
+ if (kbdev->hwcnt.kctx != NULL) {
+ kbase_context *kctx = kbdev->hwcnt.kctx;
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_OFF, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), hwcnt_setup.dump_buffer & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), hwcnt_setup.dump_buffer >> 32, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), hwcnt_setup.jm_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), hwcnt_setup.shader_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_L3_CACHE_EN), hwcnt_setup.l3_cache_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), hwcnt_setup.mmu_l2_bm, kctx);
+
+ /* Due to PRLAM-8186 we need to disable the Tiler before we enable the HW counter dump. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, kctx);
+ else
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), hwcnt_setup.tiler_bm, kctx);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, kctx);
+
+ /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), hwcnt_setup.tiler_bm, kctx);
+ }
+ kbdev->hwcnt.state = bckp_state;
+ switch(kbdev->hwcnt.state) {
+ /* Cases for waking kbasep_cache_clean_worker worker */
+ case KBASE_INSTR_STATE_CLEANED:
+ /* Cache-clean IRQ occurred, but we reset:
+ * Wakeup incase the waiter saw RESETTING */
+ case KBASE_INSTR_STATE_REQUEST_CLEAN:
+ /* After a clean was requested, but before the regs were written:
+ * Wakeup incase the waiter saw RESETTING */
+ wake_up(&kbdev->hwcnt.cache_clean_wait);
+ break;
+ case KBASE_INSTR_STATE_CLEANING:
+ /* Either:
+ * 1) We've not got the Cache-clean IRQ yet: it was lost, or:
+ * 2) We got it whilst resetting: it was voluntarily lost
+ *
+ * So, move to the next state and wakeup: */
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_CLEANED;
+ wake_up(&kbdev->hwcnt.cache_clean_wait);
+ break;
+
+ /* Cases for waking anyone else */
+ case KBASE_INSTR_STATE_DUMPING:
+ /* If dumping, abort the dump, because we may've lost the IRQ */
+ kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.triggered = 1;
+ wake_up(&kbdev->hwcnt.wait);
+ break;
+ case KBASE_INSTR_STATE_DISABLED:
+ case KBASE_INSTR_STATE_IDLE:
+ case KBASE_INSTR_STATE_FAULT:
+ /* Every other reason: wakeup in that state */
+ kbdev->hwcnt.triggered = 1;
+ wake_up(&kbdev->hwcnt.wait);
+ break;
+
+ /* Unhandled cases */
+ case KBASE_INSTR_STATE_RESETTING:
+ default:
+ BUG();
+ break;
+ }
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Complete any jobs that were still on the GPU */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
+ int nr_done;
+ kbase_jm_slot *slot = &kbdev->jm_slots[i];
+
+ nr_done = kbasep_jm_nr_jobs_submitted(slot);
+ while (nr_done) {
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Job stuck in slot %d on the GPU was cancelled", i);
+ kbase_job_done_slot(kbdev, i, BASE_JD_EVENT_JOB_CANCELLED, 0, &end_timestamp);
+ nr_done--;
+ }
+ }
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ /* Reprogram the GPU's MMU */
+ for (i = 0; i < BASE_MAX_NR_AS; i++) {
+ if (js_devdata->runpool_irq.per_as_data[i].kctx) {
+ kbase_as *as = &kbdev->as[i];
+ mutex_lock(&as->transaction_mutex);
+ kbase_mmu_update(js_devdata->runpool_irq.per_as_data[i].kctx);
+ mutex_unlock(&as->transaction_mutex);
+ }
+ }
+
+ atomic_set(&kbdev->reset_gpu, KBASE_RESET_GPU_NOT_PENDING);
+ wake_up(&kbdev->reset_wait);
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Reset complete");
+
+ /* Find out what cores are required now */
+ kbase_pm_update_cores_state(kbdev);
+
+ /* Synchronously request and wait for those cores, because if
+ * instrumentation is enabled it would need them immediately. */
+ kbase_pm_check_transitions_sync(kbdev);
+
+ /* Try submitting some jobs to restart processing */
+ if (js_devdata->nr_user_contexts_running > 0) {
+ KBASE_TRACE_ADD(kbdev, JM_SUBMIT_AFTER_RESET, NULL, NULL, 0u, 0);
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ kbasep_js_try_run_next_job_nolock(kbdev);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ }
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&kbdev->pm.lock);
+
+ kbase_pm_context_idle(kbdev);
+ KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0);
+}
+
+enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer)
+{
+ kbase_device *kbdev = container_of(timer, kbase_device, reset_timer);
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Reset still pending? */
+ if (atomic_cmpxchg(&kbdev->reset_gpu, KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) == KBASE_RESET_GPU_COMMITTED)
+ queue_work(kbdev->reset_workq, &kbdev->reset_work);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * If all jobs are evicted from the GPU then we can reset the GPU
+ * immediately instead of waiting for the timeout to elapse
+ */
+
+static void kbasep_try_reset_gpu_early_locked(kbase_device *kbdev)
+{
+ int i;
+ int pending_jobs = 0;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Count the number of jobs */
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
+ kbase_jm_slot *slot = &kbdev->jm_slots[i];
+ pending_jobs += kbasep_jm_nr_jobs_submitted(slot);
+ }
+
+ if (pending_jobs > 0) {
+ /* There are still jobs on the GPU - wait */
+ return;
+ }
+
+ /* Check that the reset has been committed to (i.e. kbase_reset_gpu has been called), and that no other
+ * thread beat this thread to starting the reset */
+ if (atomic_cmpxchg(&kbdev->reset_gpu, KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) != KBASE_RESET_GPU_COMMITTED) {
+ /* Reset has already occurred */
+ return;
+ }
+ queue_work(kbdev->reset_workq, &kbdev->reset_work);
+}
+
+static void kbasep_try_reset_gpu_early(kbase_device *kbdev)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+
+ js_devdata = &kbdev->js_data;
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ kbasep_try_reset_gpu_early_locked(kbdev);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+}
+
+/*
+ * Prepare for resetting the GPU.
+ * This function just soft-stops all the slots to ensure that as many jobs as possible are saved.
+ *
+ * The function returns a boolean which should be interpreted as follows:
+ * - MALI_TRUE - Prepared for reset, kbase_reset_gpu should be called.
+ * - MALI_FALSE - Another thread is performing a reset, kbase_reset_gpu should not be called.
+ *
+ * @return See description
+ */
+mali_bool kbase_prepare_to_reset_gpu_locked(kbase_device *kbdev)
+{
+ int i;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ if (atomic_cmpxchg(&kbdev->reset_gpu, KBASE_RESET_GPU_NOT_PENDING, KBASE_RESET_GPU_PREPARED) != KBASE_RESET_GPU_NOT_PENDING) {
+ /* Some other thread is already resetting the GPU */
+ return MALI_FALSE;
+ }
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+ kbase_job_slot_softstop(kbdev, i, NULL);
+
+ return MALI_TRUE;
+}
+
+mali_bool kbase_prepare_to_reset_gpu(kbase_device *kbdev)
+{
+ unsigned long flags;
+ mali_bool ret;
+ kbasep_js_device_data *js_devdata;
+
+ js_devdata = &kbdev->js_data;
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ ret = kbase_prepare_to_reset_gpu_locked(kbdev);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ return ret;
+}
+KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu)
+
+/*
+ * This function should be called after kbase_prepare_to_reset_gpu iff it returns MALI_TRUE.
+ * It should never be called without a corresponding call to kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu returned MALI_FALSE),
+ * the caller should wait for kbdev->reset_waitq to be signalled to know when the reset has completed.
+ */
+void kbase_reset_gpu(kbase_device *kbdev)
+{
+ u32 timeout_ms;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Note this is an assert/atomic_set because it is a software issue for a race to be occuring here */
+ KBASE_DEBUG_ASSERT(atomic_read(&kbdev->reset_gpu) == KBASE_RESET_GPU_PREPARED);
+ atomic_set(&kbdev->reset_gpu, KBASE_RESET_GPU_COMMITTED);
+
+ timeout_ms = kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS);
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n", timeout_ms);
+ hrtimer_start(&kbdev->reset_timer, HR_TIMER_DELAY_MSEC(timeout_ms), HRTIMER_MODE_REL);
+
+ /* Try resetting early */
+ kbasep_try_reset_gpu_early(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_reset_gpu)
+
+void kbase_reset_gpu_locked(kbase_device *kbdev)
+{
+ u32 timeout_ms;
+
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ /* Note this is an assert/atomic_set because it is a software issue for a race to be occuring here */
+ KBASE_DEBUG_ASSERT(atomic_read(&kbdev->reset_gpu) == KBASE_RESET_GPU_PREPARED);
+ atomic_set(&kbdev->reset_gpu, KBASE_RESET_GPU_COMMITTED);
+
+ timeout_ms = kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS);
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JD, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n", timeout_ms);
+ hrtimer_start(&kbdev->reset_timer, HR_TIMER_DELAY_MSEC(timeout_ms), HRTIMER_MODE_REL);
+
+ /* Try resetting early */
+ kbasep_try_reset_gpu_early_locked(kbdev);
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.h
new file mode 100755
index 00000000000..6bd70865984
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_jm.h
@@ -0,0 +1,202 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_jm.h
+ * Job Manager Low-level APIs.
+ */
+
+#ifndef _KBASE_JM_H_
+#define _KBASE_JM_H_
+
+#include <kbase/src/common/mali_kbase_8401_workaround.h>
+#include <kbase/src/common/mali_kbase_hw.h>
+#include <linux/atomic.h>
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_jm Job Manager Low-level APIs
+ * @{
+ *
+ */
+
+static INLINE int kbasep_jm_is_js_free(kbase_device *kbdev, int js, kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= js && js < kbdev->gpu_props.num_job_slots);
+
+ return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), kctx);
+}
+
+/**
+ * This checks that:
+ * - there is enough space in the GPU's buffers (JSn_NEXT and JSn_HEAD registers) to accomodate the job.
+ * - there is enough space to track the job in a our Submit Slots. Note that we have to maintain space to
+ * requeue one job in case the next registers on the hardware need to be cleared.
+ */
+static INLINE mali_bool kbasep_jm_is_submit_slots_free(kbase_device *kbdev, int js, kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= js && js < kbdev->gpu_props.num_job_slots);
+
+ if (atomic_read(&kbdev->reset_gpu) != KBASE_RESET_GPU_NOT_PENDING) {
+ /* The GPU is being reset - so prevent submission */
+ return MALI_FALSE;
+ }
+
+ return (mali_bool) (kbasep_jm_is_js_free(kbdev, js, kctx)
+ && kbdev->jm_slots[js].submitted_nr < (BASE_JM_SUBMIT_SLOTS - 2));
+}
+
+/**
+ * Initialize a submit slot
+ */
+static INLINE void kbasep_jm_init_submit_slot(kbase_jm_slot *slot)
+{
+ slot->submitted_nr = 0;
+ slot->submitted_head = 0;
+}
+
+/**
+ * Find the atom at the idx'th element in the queue without removing it, starting at the head with idx==0.
+ */
+static INLINE kbase_jd_atom *kbasep_jm_peek_idx_submit_slot(kbase_jm_slot *slot, u8 idx)
+{
+ u8 pos;
+ kbase_jd_atom *katom;
+
+ KBASE_DEBUG_ASSERT(idx < BASE_JM_SUBMIT_SLOTS);
+
+ pos = (slot->submitted_head + idx) & BASE_JM_SUBMIT_SLOTS_MASK;
+ katom = slot->submitted[pos];
+
+ return katom;
+}
+
+/**
+ * Pop front of the submitted
+ */
+static INLINE kbase_jd_atom *kbasep_jm_dequeue_submit_slot(kbase_jm_slot *slot)
+{
+ u8 pos;
+ kbase_jd_atom *katom;
+
+ pos = slot->submitted_head & BASE_JM_SUBMIT_SLOTS_MASK;
+ katom = slot->submitted[pos];
+ slot->submitted[pos] = NULL; /* Just to catch bugs... */
+ KBASE_DEBUG_ASSERT(katom);
+
+ /* rotate the buffers */
+ slot->submitted_head = (slot->submitted_head + 1) & BASE_JM_SUBMIT_SLOTS_MASK;
+ slot->submitted_nr--;
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "katom %p new head %u", (void *)katom, (unsigned int)slot->submitted_head);
+
+ return katom;
+}
+
+/* Pop back of the submitted queue (unsubmit a job)
+ */
+static INLINE kbase_jd_atom *kbasep_jm_dequeue_tail_submit_slot(kbase_jm_slot *slot)
+{
+ u8 pos;
+
+ slot->submitted_nr--;
+
+ pos = (slot->submitted_head + slot->submitted_nr) & BASE_JM_SUBMIT_SLOTS_MASK;
+
+ return slot->submitted[pos];
+}
+
+static INLINE u8 kbasep_jm_nr_jobs_submitted(kbase_jm_slot *slot)
+{
+ return slot->submitted_nr;
+}
+
+/**
+ * Push back of the submitted
+ */
+static INLINE void kbasep_jm_enqueue_submit_slot(kbase_jm_slot *slot, kbase_jd_atom *katom)
+{
+ u8 nr;
+ u8 pos;
+ nr = slot->submitted_nr++;
+ KBASE_DEBUG_ASSERT(nr < BASE_JM_SUBMIT_SLOTS);
+
+ pos = (slot->submitted_head + nr) & BASE_JM_SUBMIT_SLOTS_MASK;
+ slot->submitted[pos] = katom;
+}
+
+/**
+ * @brief Query whether a job peeked/dequeued from the submit slots is a
+ * 'dummy' job that is used for hardware workaround purposes.
+ *
+ * Any time a job is peeked/dequeued from the submit slots, this should be
+ * queried on that job.
+ *
+ * If a \a atom is indicated as being a dummy job, then you <b>must not attempt
+ * to use \a atom</b>. This is because its members will not necessarily be
+ * initialized, and so could lead to a fault if they were used.
+ *
+ * @param[in] kbdev kbase device pointer
+ * @param[in] atom The atom to query
+ *
+ * @return MALI_TRUE if \a atom is for a dummy job, in which case you must not
+ * attempt to use it.
+ * @return MALI_FALSE otherwise, and \a atom is safe to use.
+ */
+static INLINE mali_bool kbasep_jm_is_dummy_workaround_job(kbase_device *kbdev, kbase_jd_atom *atom)
+{
+ /* Query the set of workaround jobs here */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8401)) {
+ if (kbasep_8401_is_workaround_job(atom) != MALI_FALSE)
+ return MALI_TRUE;
+ }
+
+ /* This job is not a workaround job, so it will be processed as normal */
+ return MALI_FALSE;
+}
+
+/**
+ * @brief Submit a job to a certain job-slot
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != MALI_FALSE before calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the kbasep_js_device_data::runpoool_irq::lock
+ */
+void kbase_job_submit_nolock(kbase_device *kbdev, kbase_jd_atom *katom, int js);
+
+/**
+ * @brief Complete the head job on a particular job-slot
+ */
+void kbase_job_done_slot(kbase_device *kbdev, int s, u32 completion_code, u64 job_tail, ktime_t *end_timestamp);
+
+ /** @} *//* end group kbase_jm */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JM_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js.c
new file mode 100755
index 00000000000..66bf4c170be
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js.c
@@ -0,0 +1,2122 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Job Scheduler Implementation
+ */
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_kbase_js.h>
+#include <kbase/src/common/mali_kbase_js_affinity.h>
+#include <kbase/src/common/mali_kbase_gator.h>
+#include <kbase/src/common/mali_kbase_hw.h>
+
+#include "mali_kbase_jm.h"
+#include <kbase/src/common/mali_kbase_defs.h>
+
+/*
+ * Private types
+ */
+
+/** Bitpattern indicating the result of releasing a context */
+enum {
+ /** The context was descheduled - caller should try scheduling in a new one
+ * to keep the runpool full */
+ KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
+};
+
+typedef u32 kbasep_js_release_result;
+
+/*
+ * Private function prototypes
+ */
+STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom);
+
+STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom);
+
+STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state);
+
+/** Helper for trace subcodes */
+#if KBASE_TRACE_ENABLE != 0
+STATIC int kbasep_js_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ int as_nr;
+ int refcnt = 0;
+
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ kbasep_js_per_as_data *js_per_as_data;
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ refcnt = js_per_as_data->as_busy_refcount;
+ }
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ return refcnt;
+}
+#else /* KBASE_TRACE_ENABLE != 0 */
+STATIC int kbasep_js_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(kctx);
+ return 0;
+}
+#endif /* KBASE_TRACE_ENABLE != 0 */
+
+/*
+ * Private types
+ */
+enum {
+ JS_DEVDATA_INIT_NONE = 0,
+ JS_DEVDATA_INIT_CONSTANTS = (1 << 0),
+ JS_DEVDATA_INIT_POLICY = (1 << 1),
+ JS_DEVDATA_INIT_ALL = ((1 << 2) - 1)
+};
+
+enum {
+ JS_KCTX_INIT_NONE = 0,
+ JS_KCTX_INIT_CONSTANTS = (1 << 0),
+ JS_KCTX_INIT_POLICY = (1 << 1),
+ JS_KCTX_INIT_ALL = ((1 << 2) - 1)
+};
+
+/*
+ * Private functions
+ */
+
+/**
+ * Check if the job had performance monitoring enabled and decrement the count. If no jobs require
+ * performance monitoring, then the cycle counters will be disabled in the GPU.
+ *
+ * No locks need to be held - locking is handled further down
+ *
+ * This function does not sleep.
+ */
+
+STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ if (katom->core_req & BASE_JD_REQ_PERMON)
+ kbase_pm_release_gpu_cycle_counter(kbdev);
+}
+
+/**
+ * Check if the job has performance monitoring enabled and keep a count of it. If at least one
+ * job requires performance monitoring, then the cycle counters will be enabled in the GPU.
+ *
+ * No locks need to be held - locking is handled further down
+ *
+ * This function does not sleep.
+ */
+
+STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ if (katom->core_req & BASE_JD_REQ_PERMON)
+ kbase_pm_request_gpu_cycle_counter(kbdev);
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+STATIC INLINE void runpool_inc_context_count(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_info;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ BUG_ON(!mutex_is_locked(&js_kctx_info->ctx.jsctx_mutex));
+ BUG_ON(!mutex_is_locked(&js_devdata->runpool_mutex));
+
+ /* Track total contexts */
+ ++(js_devdata->nr_all_contexts_running);
+
+ if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
+ /* Track contexts that can submit jobs */
+ ++(js_devdata->nr_user_contexts_running);
+ }
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+STATIC INLINE void runpool_dec_context_count(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_info;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ BUG_ON(!mutex_is_locked(&js_kctx_info->ctx.jsctx_mutex));
+ BUG_ON(!mutex_is_locked(&js_devdata->runpool_mutex));
+
+ /* Track total contexts */
+ --(js_devdata->nr_all_contexts_running);
+
+ if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
+ /* Track contexts that can submit jobs */
+ --(js_devdata->nr_user_contexts_running);
+ }
+}
+
+/**
+ * @brief check whether the runpool is full for a specified context
+ *
+ * If kctx == NULL, then this makes the least restrictive check on the
+ * runpool. A specific context that is supplied immediately after could fail
+ * the check, even under the same conditions.
+ *
+ * Therefore, once a context is obtained you \b must re-check it with this
+ * function, since the return value could change to MALI_FALSE.
+ *
+ * The following locking conditions are made on the caller:
+ * - In all cases, the caller must hold kbasep_js_device_data::runpool_mutex
+ * - When kctx != NULL the caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - When kctx == NULL, then the caller need not hold any jsctx_mutex locks (but it doesn't do any harm to do so).
+ */
+STATIC mali_bool check_is_runpool_full(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_device_data *js_devdata;
+ mali_bool is_runpool_full;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+ BUG_ON(!mutex_is_locked(&js_devdata->runpool_mutex));
+
+ /* Regardless of whether a context is submitting or not, can't have more than there
+ * are HW address spaces */
+ is_runpool_full = (mali_bool) (js_devdata->nr_all_contexts_running >= kbdev->nr_hw_address_spaces);
+
+ if (kctx != NULL && (kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
+ BUG_ON(!mutex_is_locked(&kctx->jctx.sched_info.ctx.jsctx_mutex));
+ /* Contexts that submit might use less of the address spaces available, due to HW
+ * workarounds. In which case, the runpool is also full when the number of
+ * submitting contexts exceeds the number of submittable address spaces.
+ *
+ * Both checks must be made: can have nr_user_address_spaces == nr_hw_address spaces,
+ * and at the same time can have nr_user_contexts_running < nr_all_contexts_running. */
+ is_runpool_full |= (mali_bool) (js_devdata->nr_user_contexts_running >= kbdev->nr_user_address_spaces);
+ }
+
+ return is_runpool_full;
+}
+
+STATIC base_jd_core_req core_reqs_from_jsn_features(u16 features) /* JS<n>_FEATURE register value */
+{
+ base_jd_core_req core_req = 0u;
+
+ if ((features & JSn_FEATURE_SET_VALUE_JOB) != 0)
+ core_req |= BASE_JD_REQ_V;
+
+ if ((features & JSn_FEATURE_CACHE_FLUSH_JOB) != 0)
+ core_req |= BASE_JD_REQ_CF;
+
+ if ((features & JSn_FEATURE_COMPUTE_JOB) != 0)
+ core_req |= BASE_JD_REQ_CS;
+
+ if ((features & JSn_FEATURE_TILER_JOB) != 0)
+ core_req |= BASE_JD_REQ_T;
+
+ if ((features & JSn_FEATURE_FRAGMENT_JOB) != 0)
+ core_req |= BASE_JD_REQ_FS;
+
+ return core_req;
+}
+
+/**
+ * Picks and reserves an address space.
+ *
+ * When this function returns, the address space returned is reserved and
+ * cannot be picked for another context until it is released.
+ *
+ * The caller must ensure there \b is a free address space before calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_device_data::runpool_mutex
+ *
+ * @return a non-NULL pointer to a kbase_as that is not in use by any other context
+ */
+STATIC kbase_as *pick_free_addr_space(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ kbase_as *current_as;
+ long ffs_result;
+ js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ /* Find the free address space */
+ ffs_result = ffs(js_devdata->as_free) - 1;
+
+ /* ASSERT that we should've found a free one */
+ KBASE_DEBUG_ASSERT(0 <= ffs_result && ffs_result < kbdev->nr_hw_address_spaces);
+ /* Ensure no-one else picks this one */
+ js_devdata->as_free &= ~((u16) (1u << ffs_result));
+
+ current_as = &kbdev->as[ffs_result];
+
+ return current_as;
+}
+
+/**
+ * Release an address space, making it available for being picked again.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_device_data::runpool_mutex
+ */
+STATIC INLINE void release_addr_space(kbase_device *kbdev, int kctx_as_nr)
+{
+ kbasep_js_device_data *js_devdata;
+ u16 as_bit = (1u << kctx_as_nr);
+
+ js_devdata = &kbdev->js_data;
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+
+ /* The address space must not already be free */
+ KBASE_DEBUG_ASSERT(!(js_devdata->as_free & as_bit));
+
+ js_devdata->as_free |= as_bit;
+}
+
+/**
+ * Assign an Address Space (AS) to a context, and add the context to the Policy.
+ *
+ * This includes:
+ * - setting up the global runpool_irq structure and the context on the AS
+ * - Activating the MMU on the AS
+ * - Allowing jobs to be submitted on the AS
+ *
+ * Locking conditions:
+ * - Caller must hold the kbasep_js_kctx_info::jsctx_mutex
+ * - Caller must hold the kbasep_js_device_data::runpool_mutex
+ * - Caller must hold AS transaction mutex
+ * - Caller must hold Runpool IRQ lock
+ */
+STATIC void assign_and_activate_kctx_addr_space(kbase_device *kbdev, kbase_context *kctx, kbase_as *current_as)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_per_as_data *js_per_as_data;
+ int as_nr;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(current_as != NULL);
+
+ js_devdata = &kbdev->js_data;
+ as_nr = current_as->number;
+
+ lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&current_as->transaction_mutex);
+ lockdep_assert_held(&js_devdata->runpool_irq.lock);
+
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ /* Attribute handling */
+ kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
+
+ /* Assign addr space */
+ kctx->as_nr = as_nr;
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+ kbase_trace_mali_mmu_as_in_use(kctx->as_nr);
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+ /* Activate this address space on the MMU */
+ kbase_mmu_update(kctx);
+
+ /* Allow it to run jobs */
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+ /* Book-keeping */
+ js_per_as_data->kctx = kctx;
+ js_per_as_data->as_busy_refcount = 0;
+
+ /* Lastly, add the context to the policy's runpool - this really allows it to run jobs */
+ kbasep_js_policy_runpool_add_ctx(&js_devdata->policy, kctx);
+
+}
+
+void kbasep_js_try_run_next_job_nolock(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ int js;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&js_devdata->runpool_irq.lock);
+
+ /* It's cheap and simple to retest this here - otherwise we burden the
+ * caller with it. In some cases, we do this higher up to optimize out the
+ * spinlock. */
+ if (js_devdata->nr_user_contexts_running == 0)
+ return; /* No contexts present - the GPU might be powered off, so just return */
+
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; ++js)
+ kbasep_js_try_run_next_job_on_slot_nolock(kbdev, js);
+}
+
+/** Hold the kbasep_js_device_data::runpool_irq::lock for this */
+mali_bool kbasep_js_runpool_retain_ctx_nolock(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_per_as_data *js_per_as_data;
+ mali_bool result = MALI_FALSE;
+ int as_nr;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_devdata = &kbdev->js_data;
+
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ int new_refcnt;
+
+ KBASE_DEBUG_ASSERT(as_nr >= 0);
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ KBASE_DEBUG_ASSERT(js_per_as_data->kctx != NULL);
+
+ new_refcnt = ++(js_per_as_data->as_busy_refcount);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx, NULL, 0u, new_refcnt);
+ result = MALI_TRUE;
+ }
+
+ return result;
+}
+
+/*
+ * Functions private to KBase ('Protected' functions)
+ */
+void kbase_js_try_run_jobs(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ unsigned long flags;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ if (js_devdata->nr_user_contexts_running != 0) {
+ /* Only try running jobs when we have contexts present, otherwise the GPU might be powered off. */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ kbasep_js_try_run_next_job_nolock(kbdev);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ }
+ mutex_unlock(&js_devdata->runpool_mutex);
+}
+
+void kbase_js_try_run_jobs_on_slot(kbase_device *kbdev, int js)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ if (js_devdata->nr_user_contexts_running != 0) {
+ /* Only try running jobs when we have contexts present, otherwise the GPU might be powered off. */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ kbasep_js_try_run_next_job_on_slot_nolock(kbdev, js);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ }
+ mutex_unlock(&js_devdata->runpool_mutex);
+}
+
+mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ mali_error err;
+ int i;
+ u16 as_present;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ KBASE_DEBUG_ASSERT(js_devdata->init_status == JS_DEVDATA_INIT_NONE);
+
+ /* These two must be recalculated if nr_hw_address_spaces changes (e.g. for HW workarounds) */
+ as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
+ kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+ mali_bool use_workaround_for_security;
+ use_workaround_for_security = (mali_bool) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE);
+ if (use_workaround_for_security != MALI_FALSE) {
+ KBASE_DEBUG_PRINT(KBASE_JM, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
+ kbdev->nr_user_address_spaces = 1;
+ }
+ }
+#ifdef CONFIG_MALI_DEBUG
+ /* Soft-stop will be disabled on a single context by default unless softstop_always is set */
+ js_devdata->softstop_always = MALI_FALSE;
+#endif /* CONFIG_MALI_DEBUG */
+ js_devdata->nr_all_contexts_running = 0;
+ js_devdata->nr_user_contexts_running = 0;
+ js_devdata->as_free = as_present; /* All ASs initially free */
+ js_devdata->runpool_irq.submit_allowed = 0u; /* No ctx allowed to submit */
+ memset(js_devdata->runpool_irq.ctx_attr_ref_count, 0, sizeof(js_devdata->runpool_irq.ctx_attr_ref_count));
+ memset(js_devdata->runpool_irq.slot_affinities, 0, sizeof(js_devdata->runpool_irq.slot_affinities));
+ js_devdata->runpool_irq.slots_blocked_on_affinity = 0u;
+ memset(js_devdata->runpool_irq.slot_affinity_refcount, 0, sizeof(js_devdata->runpool_irq.slot_affinity_refcount));
+ INIT_LIST_HEAD(&js_devdata->suspended_soft_jobs_list);
+
+ /* Config attributes */
+ js_devdata->scheduling_tick_ns = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS);
+ js_devdata->soft_stop_ticks = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS);
+ js_devdata->hard_stop_ticks_ss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS);
+ js_devdata->hard_stop_ticks_nss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS);
+ js_devdata->gpu_reset_ticks_ss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS);
+ js_devdata->gpu_reset_ticks_nss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS);
+ js_devdata->ctx_timeslice_ns = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS);
+ js_devdata->cfs_ctx_runtime_init_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES);
+ js_devdata->cfs_ctx_runtime_min_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS Config Attribs: ");
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->scheduling_tick_ns:%u", js_devdata->scheduling_tick_ns);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->soft_stop_ticks:%u", js_devdata->soft_stop_ticks);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->hard_stop_ticks_ss:%u", js_devdata->hard_stop_ticks_ss);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->hard_stop_ticks_nss:%u", js_devdata->hard_stop_ticks_nss);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->gpu_reset_ticks_ss:%u", js_devdata->gpu_reset_ticks_ss);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->gpu_reset_ticks_nss:%u", js_devdata->gpu_reset_ticks_nss);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->ctx_timeslice_ns:%u", js_devdata->ctx_timeslice_ns);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->cfs_ctx_runtime_init_slices:%u", js_devdata->cfs_ctx_runtime_init_slices);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "\tjs_devdata->cfs_ctx_runtime_min_slices:%u", js_devdata->cfs_ctx_runtime_min_slices);
+
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0
+ KBASE_DEBUG_PRINT(KBASE_JM, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
+ KBASE_DEBUG_PRINT(KBASE_JM, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%u at %uns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 && KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
+ KBASE_DEBUG_PRINT(KBASE_JM, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
+#endif
+
+ /* setup the number of irq throttle cycles base on given time */
+ {
+ int irq_throttle_time_us = kbdev->gpu_props.irq_throttle_time_us;
+ int irq_throttle_cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev, irq_throttle_time_us);
+ atomic_set(&kbdev->irq_throttle_cycles, irq_throttle_cycles);
+ }
+
+ /* Clear the AS data, including setting NULL pointers */
+ memset(&js_devdata->runpool_irq.per_as_data[0], 0, sizeof(js_devdata->runpool_irq.per_as_data));
+
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
+ js_devdata->js_reqs[i] = core_reqs_from_jsn_features(kbdev->gpu_props.props.raw_props.js_features[i]);
+
+ js_devdata->init_status |= JS_DEVDATA_INIT_CONSTANTS;
+
+ /* On error, we could continue on: providing none of the below resources
+ * rely on the ones above */
+
+ mutex_init(&js_devdata->runpool_mutex);
+ mutex_init(&js_devdata->queue_mutex);
+ spin_lock_init(&js_devdata->runpool_irq.lock);
+
+ err = kbasep_js_policy_init(kbdev);
+ if (err == MALI_ERROR_NONE)
+ js_devdata->init_status |= JS_DEVDATA_INIT_POLICY;
+
+ /* On error, do no cleanup; this will be handled by the caller(s), since
+ * we've designed this resource to be safe to terminate on init-fail */
+ if (js_devdata->init_status != JS_DEVDATA_INIT_ALL)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ return MALI_ERROR_NONE;
+}
+
+void kbasep_js_devdata_halt(kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbasep_js_devdata_term(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ if ((js_devdata->init_status & JS_DEVDATA_INIT_CONSTANTS)) {
+ s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
+ /* The caller must de-register all contexts before calling this */
+ KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
+ KBASE_DEBUG_ASSERT(memcmp(js_devdata->runpool_irq.ctx_attr_ref_count, zero_ctx_attr_ref_count, sizeof(js_devdata->runpool_irq.ctx_attr_ref_count)) == 0);
+ CSTD_UNUSED(zero_ctx_attr_ref_count);
+ }
+ if ((js_devdata->init_status & JS_DEVDATA_INIT_POLICY))
+ kbasep_js_policy_term(&js_devdata->policy);
+
+ js_devdata->init_status = JS_DEVDATA_INIT_NONE;
+}
+
+mali_error kbasep_js_kctx_init(kbase_context * const kctx)
+{
+ kbase_device *kbdev;
+ kbasep_js_kctx_info *js_kctx_info;
+ mali_error err;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+ KBASE_DEBUG_ASSERT(js_kctx_info->init_status == JS_KCTX_INIT_NONE);
+
+ js_kctx_info->ctx.nr_jobs = 0;
+ js_kctx_info->ctx.is_scheduled = MALI_FALSE;
+ js_kctx_info->ctx.is_dying = MALI_FALSE;
+ memset(js_kctx_info->ctx.ctx_attr_ref_count, 0, sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
+
+ /* Initially, the context is disabled from submission until the create flags are set */
+ js_kctx_info->ctx.flags = KBASE_CTX_FLAG_SUBMIT_DISABLED;
+
+ js_kctx_info->init_status |= JS_KCTX_INIT_CONSTANTS;
+
+ /* On error, we could continue on: providing none of the below resources
+ * rely on the ones above */
+ mutex_init(&js_kctx_info->ctx.jsctx_mutex);
+
+ init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
+
+ err = kbasep_js_policy_init_ctx(kbdev, kctx);
+ if (err == MALI_ERROR_NONE)
+ js_kctx_info->init_status |= JS_KCTX_INIT_POLICY;
+
+ /* On error, do no cleanup; this will be handled by the caller(s), since
+ * we've designed this resource to be safe to terminate on init-fail */
+ if (js_kctx_info->init_status != JS_KCTX_INIT_ALL)
+ return MALI_ERROR_FUNCTION_FAILED;
+
+ return MALI_ERROR_NONE;
+}
+
+void kbasep_js_kctx_term(kbase_context *kctx)
+{
+ kbase_device *kbdev;
+ kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_policy *js_policy;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbdev = kctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_policy = &kbdev->js_data.policy;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ if ((js_kctx_info->init_status & JS_KCTX_INIT_CONSTANTS)) {
+ /* The caller must de-register all jobs before calling this */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
+ }
+
+ if ((js_kctx_info->init_status & JS_KCTX_INIT_POLICY))
+ kbasep_js_policy_term_ctx(js_policy, kctx);
+
+ js_kctx_info->init_status = JS_KCTX_INIT_NONE;
+}
+
+/* Evict jobs from the NEXT registers
+ *
+ * The caller must hold:
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex
+ * - kbasep_js_device_data::runpool_mutex
+ */
+STATIC void kbasep_js_runpool_evict_next_jobs(kbase_device *kbdev, kbase_context *kctx)
+{
+ unsigned long flags;
+ int js;
+ kbasep_js_device_data *js_devdata;
+
+ js_devdata = &kbdev->js_data;
+
+ BUG_ON(!mutex_is_locked(&kctx->jctx.sched_info.ctx.jsctx_mutex));
+ BUG_ON(!mutex_is_locked(&js_devdata->runpool_mutex));
+
+ /* Prevent contexts in the runpool from submitting jobs */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ /* There's no need to prevent contexts in the runpool from submitting jobs,
+ * because we complete this operation by the time we release the
+ * runpool_irq.lock */
+
+ /* Evict jobs from the NEXT registers */
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ kbase_jm_slot *slot;
+ kbase_jd_atom *tail;
+
+ if (!kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), NULL)) {
+ /* No job in the NEXT register */
+ continue;
+ }
+
+ slot = &kbdev->jm_slots[js];
+ tail = kbasep_jm_peek_idx_submit_slot(slot, slot->submitted_nr - 1);
+
+ KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, 1);
+ /* Clearing job from next registers */
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_NOP, NULL);
+
+ /* Check to see if we did remove a job from the next registers */
+ if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), NULL) != 0) {
+ /* The job was successfully cleared from the next registers, requeue it */
+ kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
+ KBASE_DEBUG_ASSERT(dequeued_katom == tail);
+
+ /* Set the next registers to NULL */
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), 0, NULL);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), 0, NULL);
+
+ KBASE_TRACE_ADD_SLOT(kbdev, JM_SLOT_EVICT, dequeued_katom->kctx, dequeued_katom, dequeued_katom->jc, js);
+
+ /* Complete the job, indicate that it took no time, and don't start
+ * new atoms */
+ kbase_jd_done(dequeued_katom, js, NULL, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
+ }
+ KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, 0);
+ }
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+}
+
+/**
+ * Fast start a higher priority job
+ * If the runpool is full, the lower priority contexts with no running jobs
+ * will be evicted from the runpool
+ *
+ * If \a kctx_new is NULL, the first context with no running jobs will be evicted
+ *
+ * The following locking conditions are made on the caller:
+ * - The caller must \b not hold \a kctx_new's
+ * kbasep_js_kctx_info::ctx::jsctx_mutex, or that mutex of any ctx in the
+ * runpool. This is because \a kctx_new's jsctx_mutex and one of the other
+ * scheduled ctx's jsctx_mutex will be obtained internally.
+ * - it must \em not hold kbasep_js_device_data::runpool_irq::lock (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used
+ * internally).
+ */
+STATIC void kbasep_js_runpool_attempt_fast_start_ctx(kbase_device *kbdev, kbase_context *kctx_new)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_new;
+ kbasep_js_policy *js_policy;
+ kbasep_js_per_as_data *js_per_as_data;
+ int evict_as_nr;
+ kbasep_js_atom_retained_state katom_retained_state;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+
+ if (kctx_new != NULL) {
+ js_kctx_new = &kctx_new->jctx.sched_info;
+ mutex_lock(&js_kctx_new->ctx.jsctx_mutex);
+ } else {
+ js_kctx_new = NULL;
+ CSTD_UNUSED(js_kctx_new);
+ }
+
+ /* Setup a dummy katom_retained_state */
+ kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ /* If the runpool is full and it's not dying, attempt to fast start our context */
+ if (check_is_runpool_full(kbdev, kctx_new) != MALI_FALSE
+ && !js_kctx_new->ctx.is_dying) {
+ /* No free address spaces - attempt to evict non-running lower priority context */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ for (evict_as_nr = 0; evict_as_nr < kbdev->nr_hw_address_spaces; evict_as_nr++) {
+ kbase_context *kctx_evict;
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[evict_as_nr];
+ kctx_evict = js_per_as_data->kctx;
+
+ /* Look for the AS which is not currently running */
+ if (0 == js_per_as_data->as_busy_refcount && kctx_evict != NULL) {
+ /* Now compare the scheduled priority we are considering evicting with the new ctx priority
+ * and take into consideration if the scheduled priority is a realtime policy or not.
+ * Note that the lower the number, the higher the priority
+ */
+ if ((kctx_new == NULL) || kbasep_js_policy_ctx_has_priority(js_policy, kctx_evict, kctx_new)) {
+ mali_bool retain_result;
+ kbasep_js_release_result release_result;
+ KBASE_TRACE_ADD(kbdev, JS_FAST_START_EVICTS_CTX, kctx_evict, NULL, 0u, (u32) kctx_new);
+
+ /* Retain the ctx to work on it - this shouldn't be able to fail */
+ retain_result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx_evict);
+ KBASE_DEBUG_ASSERT(retain_result != MALI_FALSE);
+ CSTD_UNUSED(retain_result);
+
+ /* This will cause the context to be scheduled out on the next runpool_release_ctx(),
+ * and also stop its refcount increasing */
+ kbasep_js_clear_submit_allowed(js_devdata, kctx_evict);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ if (kctx_new != NULL)
+ mutex_unlock(&js_kctx_new->ctx.jsctx_mutex);
+
+ /* Stop working on the target context, start working on the kctx_evict context */
+
+ mutex_lock(&kctx_evict->jctx.sched_info.ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx_evict, &katom_retained_state);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ /* Only requeue if actually descheduled, which is more robust in case
+ * something else retains it (e.g. two high priority contexts racing
+ * to evict the same lower priority context) */
+ if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx_evict, MALI_TRUE);
+
+ mutex_unlock(&kctx_evict->jctx.sched_info.ctx.jsctx_mutex);
+
+ /* release_result isn't propogated further:
+ * - the caller will be scheduling in a context anyway
+ * - which will also cause new jobs to run */
+
+ /* ctx fast start has taken place */
+ return;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ }
+
+ /* ctx fast start has not taken place */
+ mutex_unlock(&js_devdata->runpool_mutex);
+ if (kctx_new != NULL)
+ mutex_unlock(&js_kctx_new->ctx.jsctx_mutex);
+}
+
+mali_bool kbasep_js_add_job(kbase_context *kctx, kbase_jd_atom *atom)
+{
+ unsigned long flags;
+ kbasep_js_kctx_info *js_kctx_info;
+ kbase_device *kbdev;
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_policy *js_policy;
+
+ mali_bool policy_queue_updated = MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ kbdev = kctx->kbdev;
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom));
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ /* Policy-specific initialization of atoms (which cannot fail). Anything that
+ * could've failed must've been done at kbasep_jd_policy_init_job() time. */
+ kbasep_js_policy_register_job(js_policy, kctx, atom);
+
+ /*
+ * Begin Runpool transaction
+ */
+ mutex_lock(&js_devdata->runpool_mutex);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc, kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+ /* Refcount ctx.nr_jobs */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
+ ++(js_kctx_info->ctx.nr_jobs);
+
+ /* Setup any scheduling information */
+ kbasep_js_clear_job_retry_submit(atom);
+
+ /* Lock for state available during IRQ */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ /* Context Attribute Refcounting */
+ kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
+
+ /* Enqueue the job in the policy, causing it to be scheduled if the
+ * parent context gets scheduled */
+ kbasep_js_policy_enqueue_job(js_policy, atom);
+
+ if (js_kctx_info->ctx.is_scheduled != MALI_FALSE) {
+ /* Handle an already running context - try to run the new job, in case it
+ * matches requirements that aren't matched by any other job in the Run
+ * Pool */
+ kbasep_js_try_run_next_job_nolock(kbdev);
+ }
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ /* End runpool transaction */
+
+ if (js_kctx_info->ctx.is_scheduled == MALI_FALSE) {
+ if (js_kctx_info->ctx.is_dying) {
+ /* A job got added while/after kbase_job_zap_context() was called
+ * on a non-scheduled context (e.g. KDS dependency resolved). Kill
+ * that job by killing the context. */
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, MALI_FALSE);
+ } else if (js_kctx_info->ctx.nr_jobs == 1) {
+ /* Handle Refcount going from 0 to 1: schedule the context on the Policy Queue */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: Enqueue Context %p", kctx);
+
+ mutex_lock(&js_devdata->queue_mutex);
+ kbasep_js_policy_enqueue_ctx(js_policy, kctx);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ /* Policy Queue was updated - caller must try to schedule the head context
+ * We also try to encourage a fast-start from here. */
+ policy_queue_updated = MALI_TRUE;
+ }
+ }
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* If the runpool is full and this job has a higher priority than the
+ * non-running job in the runpool - evict it so this higher priority job
+ * starts faster. Fast-starting requires the jsctx_mutex to be dropped,
+ * because it works on multiple ctxs
+ *
+ * Note: If the context is being killed with kbase_job_zap_context(), then
+ * kctx can't disappear after the jsctx_mutex was dropped. This is because
+ * the caller holds kctx->jctx.lock */
+ if (policy_queue_updated)
+ kbasep_js_runpool_attempt_fast_start_ctx(kbdev, kctx);
+
+ return policy_queue_updated;
+}
+
+void kbasep_js_remove_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *atom)
+{
+ kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_policy *js_policy;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc, kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+ /* De-refcount ctx.nr_jobs */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
+ --(js_kctx_info->ctx.nr_jobs);
+
+ /* De-register the job from the system */
+ kbasep_js_policy_deregister_job(js_policy, kctx, atom);
+}
+
+void kbasep_js_remove_cancelled_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom)
+{
+ unsigned long flags;
+ kbasep_js_atom_retained_state katom_retained_state;
+ kbasep_js_device_data *js_devdata;
+ mali_bool attr_state_changed;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+ kbasep_js_remove_job(kbdev, kctx, katom);
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ /* The atom has 'finished' (will not be re-run), so no need to call
+ * kbasep_js_has_atom_finished().
+ *
+ * This is because it returns MALI_FALSE for soft-stopped atoms, but we
+ * want to override that, because we're cancelling an atom regardless of
+ * whether it was soft-stopped or not */
+ attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx, &katom_retained_state);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ if (attr_state_changed != MALI_FALSE) {
+ /* A change in runpool ctx attributes might mean we can run more jobs
+ * than before. */
+ kbase_js_try_run_jobs(kbdev);
+ }
+}
+
+mali_bool kbasep_js_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ mali_bool result;
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ /* KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_RETAIN_CTX, kctx, NULL, 0,
+ kbasep_js_trace_get_refcnt(kbdev, kctx)); */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ return result;
+}
+
+kbase_context *kbasep_js_runpool_lookup_ctx(kbase_device *kbdev, int as_nr)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ kbase_context *found_kctx = NULL;
+ kbasep_js_per_as_data *js_per_as_data;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+ js_devdata = &kbdev->js_data;
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ found_kctx = js_per_as_data->kctx;
+
+ if (found_kctx != NULL)
+ ++(js_per_as_data->as_busy_refcount);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ return found_kctx;
+}
+
+/**
+ * @brief Try running more jobs after releasing a context and/or atom
+ *
+ * This collates a set of actions that must happen whilst
+ * kbasep_js_device_data::runpool_irq::lock is held.
+ *
+ * This includes running more jobs when:
+ * - The previously released kctx caused a ctx attribute change
+ * - The released atom caused a ctx attribute change
+ * - Slots were previously blocked due to affinity restrictions
+ * - Submission during IRQ handling failed
+ */
+STATIC void kbasep_js_run_jobs_after_ctx_and_atom_release(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state, mali_bool runpool_ctx_attr_change)
+{
+ kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
+ js_devdata = &kbdev->js_data;
+
+ lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&js_devdata->runpool_irq.lock);
+
+ if (js_devdata->nr_user_contexts_running != 0) {
+ mali_bool retry_submit;
+ int retry_jobslot;
+
+ retry_submit = kbasep_js_get_atom_retry_submit_slot(katom_retained_state, &retry_jobslot);
+
+ if (runpool_ctx_attr_change != MALI_FALSE) {
+ /* A change in runpool ctx attributes might mean we can run more jobs
+ * than before */
+ kbasep_js_try_run_next_job_nolock(kbdev);
+
+ /* A retry submit on all slots has now happened, so don't need to do it again */
+ retry_submit = MALI_FALSE;
+ }
+
+ /* Submit on any slots that might've had atoms blocked by the affinity of
+ * a completed atom.
+ *
+ * If no atom has recently completed, then this is harmelss */
+ kbase_js_affinity_submit_to_blocked_slots(kbdev);
+
+ /* If the IRQ handler failed to get a job from the policy, try again from
+ * outside the IRQ handler
+ * NOTE: We may've already cleared retry_submit from submitting above */
+ if (retry_submit != MALI_FALSE) {
+ KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB, kctx, NULL, 0u, retry_jobslot);
+ kbasep_js_try_run_next_job_on_slot_nolock(kbdev, retry_jobslot);
+ }
+ }
+}
+
+/**
+ * Internal function to release the reference on a ctx and an atom's "retained
+ * state", only taking the runpool and as transaction mutexes
+ *
+ * This also starts more jobs running in the case of an ctx-attribute state change
+ *
+ * This does none of the followup actions for scheduling:
+ * - It does not schedule in a new context
+ * - It does not requeue or handle dying contexts
+ *
+ * For those tasks, just call kbasep_js_runpool_release_ctx() instead
+ *
+ * Requires:
+ * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
+ * - Context has a non-zero refcount
+ * - Caller holds js_kctx_info->ctx.jsctx_mutex
+ * - Caller holds js_devdata->runpool_mutex
+ */
+STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_policy *js_policy;
+ kbasep_js_per_as_data *js_per_as_data;
+
+ kbasep_js_release_result release_result = 0u;
+ mali_bool runpool_ctx_attr_change = MALI_FALSE;
+ int kctx_as_nr;
+ kbase_as *current_as;
+ int new_ref_count;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled != MALI_FALSE);
+
+ /* kctx->as_nr and js_per_as_data are only read from here. The caller's
+ * js_ctx_mutex provides a barrier that ensures they are up-to-date.
+ *
+ * They will not change whilst we're reading them, because the refcount
+ * is non-zero (and we ASSERT on that last fact).
+ */
+ kctx_as_nr = kctx->as_nr;
+ KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[kctx_as_nr];
+ KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
+
+ /*
+ * Transaction begins on AS and runpool_irq
+ *
+ * Assert about out calling contract
+ */
+ current_as = &kbdev->as[kctx_as_nr];
+ mutex_lock(&current_as->transaction_mutex);
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
+ KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0);
+
+ /* Update refcount */
+ new_ref_count = --(js_per_as_data->as_busy_refcount);
+
+ /* Release the atom if it finished (i.e. wasn't soft-stopped) */
+ if (kbasep_js_has_atom_finished(katom_retained_state) != MALI_FALSE)
+ runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx, katom_retained_state);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u, new_ref_count);
+
+ if (new_ref_count == 1 && kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED
+ && !kbase_pm_is_suspending(kbdev) ) {
+ /* Context is kept scheduled into an address space even when there are no jobs, in this case we have
+ * to handle the situation where all jobs have been evicted from the GPU and submission is disabled.
+ *
+ * At this point we re-enable submission to allow further jobs to be executed
+ */
+ kbasep_js_set_submit_allowed(js_devdata, kctx);
+ }
+
+ /* Make a set of checks to see if the context should be scheduled out */
+ if (new_ref_count == 0 && (kctx->jctx.sched_info.ctx.nr_jobs == 0 || kbasep_js_is_submit_allowed(js_devdata, kctx) == MALI_FALSE)) {
+ /* Last reference, and we've been told to remove this context from the Run Pool */
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d", kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, kbasep_js_is_submit_allowed(js_devdata, kctx));
+
+ kbasep_js_policy_runpool_remove_ctx(js_policy, kctx);
+
+ /* Stop any more refcounts occuring on the context */
+ js_per_as_data->kctx = NULL;
+
+ /* Ensure we prevent the context from submitting any new jobs
+ * e.g. from kbasep_js_try_run_next_job_on_slot_irq_nolock() */
+ kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+ /* Disable the MMU on the affected address space, and indicate it's invalid */
+ kbase_mmu_disable(kctx);
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+ kbase_trace_mali_mmu_as_released(kctx->as_nr);
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+
+ /* Ctx Attribute handling
+ *
+ * Releasing atoms attributes must either happen before this, or after
+ * 'is_scheduled' is changed, otherwise we double-decount the attributes*/
+ runpool_ctx_attr_change |= kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
+
+ /* Early update of context count, to optimize the
+ * kbasep_js_run_jobs_after_ctx_and_atom_release() call */
+ runpool_dec_context_count(kbdev, kctx);
+
+ /* Releasing the context and katom retained state can allow more jobs to run */
+ kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx, katom_retained_state, runpool_ctx_attr_change);
+
+ /*
+ * Transaction ends on AS and runpool_irq:
+ *
+ * By this point, the AS-related data is now clear and ready for re-use.
+ *
+ * Since releases only occur once for each previous successful retain, and no more
+ * retains are allowed on this context, no other thread will be operating in this
+ * code whilst we are
+ */
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ mutex_unlock(&current_as->transaction_mutex);
+
+ /* Free up the address space */
+ release_addr_space(kbdev, kctx_as_nr);
+ /* Note: Don't reuse kctx_as_nr now */
+
+ /* Synchronize with any policy timers */
+ kbasep_js_policy_runpool_timers_sync(js_policy);
+
+ /* update book-keeping info */
+ js_kctx_info->ctx.is_scheduled = MALI_FALSE;
+ /* Signal any waiter that the context is not scheduled, so is safe for
+ * termination - once the jsctx_mutex is also dropped, and jobs have
+ * finished. */
+ wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+ /* Queue an action to occur after we've dropped the lock */
+ release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED;
+
+ } else {
+ kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx, katom_retained_state, runpool_ctx_attr_change);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ mutex_unlock(&current_as->transaction_mutex);
+ }
+
+ return release_result;
+}
+
+void kbasep_js_runpool_requeue_or_kill_ctx(kbase_device *kbdev, kbase_context *kctx, mali_bool has_pm_ref)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_policy *js_policy;
+ kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_policy = &kbdev->js_data.policy;
+ js_devdata = &kbdev->js_data;
+
+ /* This is called if and only if you've you've detached the context from
+ * the Runpool or the Policy Queue, and not added it back to the Runpool */
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
+
+ if (js_kctx_info->ctx.is_dying != MALI_FALSE) {
+ /* Dying: don't requeue, but kill all jobs on the context. This happens
+ * asynchronously */
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: ** Killing Context %p on RunPool Remove **", kctx);
+ kbasep_js_policy_foreach_ctx_job(js_policy, kctx, &kbase_jd_cancel, MALI_TRUE);
+ } else if (js_kctx_info->ctx.nr_jobs > 0) {
+ /* Not dying, has jobs: de-ref core counts from each job before addding
+ * back to the queue */
+ kbasep_js_policy_foreach_ctx_job(js_policy, kctx, &kbasep_js_job_check_deref_cores, MALI_FALSE);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: Requeue Context %p", kctx);
+ mutex_lock(&js_devdata->queue_mutex);
+ kbasep_js_policy_enqueue_ctx(js_policy, kctx);
+ mutex_unlock(&js_devdata->queue_mutex);
+ } else {
+ /* Not dying, no jobs: don't add back to the queue */
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: Idling Context %p (not requeued)", kctx);
+ }
+
+ if (has_pm_ref) {
+ /* In all cases where we had a pm active refcount, release it */
+ kbase_pm_context_idle(kbdev);
+ }
+}
+
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_release_result release_result;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, katom_retained_state);
+
+ /* Drop the runpool mutex to allow requeing kctx */
+ mutex_unlock(&js_devdata->runpool_mutex);
+ if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, MALI_TRUE);
+
+ /* Drop the jsctx_mutex to allow scheduling in a new context */
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u) {
+ /* We've freed up an address space, so let's try to schedule in another
+ * context
+ *
+ * Note: if there's a context to schedule in, then it also tries to run
+ * another job, in case the new context has jobs satisfying requirements
+ * that no other context/job in the runpool does */
+ kbasep_js_try_schedule_head_ctx(kbdev);
+ }
+}
+
+void kbasep_js_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_atom_retained_state katom_retained_state;
+
+ kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+ kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
+}
+
+/** Variant of kbasep_js_runpool_release_ctx() that doesn't call into
+ * kbasep_js_try_schedule_head_ctx() */
+STATIC void kbasep_js_runpool_release_ctx_no_schedule(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_release_result release_result;
+ kbasep_js_atom_retained_state katom_retained_state_struct;
+ kbasep_js_atom_retained_state *katom_retained_state = &katom_retained_state_struct;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+ js_devdata = &kbdev->js_data;
+ kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, katom_retained_state);
+
+ /* Drop the runpool mutex to allow requeing kctx */
+ mutex_unlock(&js_devdata->runpool_mutex);
+ if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, MALI_TRUE);
+
+ /* Drop the jsctx_mutex to allow scheduling in a new context */
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* NOTE: could return release_result if the caller would like to know
+ * whether it should schedule a new context, but currently no callers do */
+}
+
+
+/**
+ * @brief Handle retaining cores for power management and affinity management,
+ * ensuring that cores are powered up and won't violate affinity restrictions.
+ *
+ * This function enters at the following @ref kbase_atom_coreref_state states:
+ *
+ * - NO_CORES_REQUESTED,
+ * - WAITING_FOR_REQUESTED_CORES,
+ * - RECHECK_AFFINITY,
+ *
+ * The transitions are as folows:
+ * - NO_CORES_REQUESTED -> WAITING_FOR_REQUESTED_CORES
+ * - WAITING_FOR_REQUESTED_CORES -> ( WAITING_FOR_REQUESTED_CORES or RECHECK_AFFINITY )
+ * - RECHECK_AFFINITY -> ( WAITING_FOR_REQUESTED_CORES or CHECK_AFFINITY_VIOLATIONS )
+ * - CHECK_AFFINITY_VIOLATIONS -> ( RECHECK_AFFINITY or READY )
+ *
+ * The caller must hold:
+ * - kbasep_js_device_data::runpool_irq::lock
+ *
+ * @return MALI_FALSE when the function makes a transition to the same or lower state, indicating
+ * that the cores are not ready.
+ * @return MALI_TRUE once READY state is reached, indicating that the cores are 'ready' and won't
+ * violate affinity restrictions.
+ *
+ */
+STATIC mali_bool kbasep_js_job_check_ref_cores(kbase_device *kbdev, int js, kbase_jd_atom *katom)
+{
+ /* The most recently checked affinity. Having this at this scope allows us
+ * to guarantee that we've checked the affinity in this function call. */
+ u64 recently_chosen_affinity = 0;
+ mali_bool chosen_affinity = MALI_FALSE;
+ mali_bool retry;
+
+ do {
+ retry = MALI_FALSE;
+
+ /* NOTE: The following uses a number of FALLTHROUGHs to optimize the
+ * calls to this function. Ending of the function is indicated by BREAK OUT */
+ switch (katom->coreref_state) {
+ /* State when job is first attempted to be run */
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ KBASE_DEBUG_ASSERT(katom->affinity == 0);
+
+ /* Compute affinity */
+ if (MALI_FALSE == kbase_js_choose_affinity(&recently_chosen_affinity, kbdev, katom, js)) {
+ /* No cores are currently available */
+ /* *** BREAK OUT: No state transition *** */
+ break;
+ }
+
+ chosen_affinity = MALI_TRUE;
+
+ /* Request the cores */
+ kbase_pm_request_cores(kbdev, katom->core_req & BASE_JD_REQ_T, recently_chosen_affinity);
+
+ katom->affinity = recently_chosen_affinity;
+
+ /* Proceed to next state */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ {
+ kbase_pm_cores_ready cores_ready;
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
+
+ cores_ready = kbase_pm_register_inuse_cores(kbdev, katom->core_req & BASE_JD_REQ_T, katom->affinity);
+ if (cores_ready == KBASE_NEW_AFFINITY) {
+ /* Affinity no longer valid - return to previous state */
+ kbasep_js_job_check_deref_cores(kbdev, katom);
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REGISTER_INUSE_FAILED, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
+ /* *** BREAK OUT: Return to previous state, retry *** */
+ retry = MALI_TRUE;
+ break;
+ }
+ if (cores_ready == KBASE_CORES_NOT_READY) {
+ /* Stay in this state and return, to retry at this state later */
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REGISTER_INUSE_FAILED, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
+ /* *** BREAK OUT: No state transition *** */
+ break;
+ }
+ /* Proceed to next state */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ }
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
+
+ /* Optimize out choosing the affinity twice in the same function call */
+ if (chosen_affinity == MALI_FALSE) {
+ /* See if the affinity changed since a previous call. */
+ if (MALI_FALSE == kbase_js_choose_affinity(&recently_chosen_affinity, kbdev, katom, js)) {
+ /* No cores are currently available */
+ kbasep_js_job_check_deref_cores(kbdev, katom);
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REQUEST_ON_RECHECK_FAILED, katom->kctx, katom, katom->jc, js, (u32) recently_chosen_affinity);
+ /* *** BREAK OUT: Transition to lower state *** */
+ break;
+ }
+ chosen_affinity = MALI_TRUE;
+ }
+
+ /* Now see if this requires a different set of cores */
+ if (recently_chosen_affinity != katom->affinity) {
+ kbase_pm_cores_ready cores_ready;
+
+ kbase_pm_request_cores(kbdev, katom->core_req & BASE_JD_REQ_T, recently_chosen_affinity);
+
+ /* Register new cores whilst we still hold the old ones, to minimize power transitions */
+ cores_ready = kbase_pm_register_inuse_cores(kbdev, katom->core_req & BASE_JD_REQ_T, recently_chosen_affinity);
+ kbasep_js_job_check_deref_cores(kbdev, katom);
+
+ /* Fixup the state that was reduced by deref_cores: */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ katom->affinity = recently_chosen_affinity;
+ if (cores_ready == KBASE_NEW_AFFINITY) {
+ /* Affinity no longer valid - return to previous state */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+ kbasep_js_job_check_deref_cores(kbdev, katom);
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REGISTER_INUSE_FAILED, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
+ /* *** BREAK OUT: Return to previous state, retry *** */
+ retry = MALI_TRUE;
+ break;
+ }
+ /* Now might be waiting for powerup again, with a new affinity */
+ if (cores_ready == KBASE_CORES_NOT_READY) {
+ /* Return to previous state */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REGISTER_ON_RECHECK_FAILED, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
+ /* *** BREAK OUT: Transition to lower state *** */
+ break;
+ }
+ }
+ /* Proceed to next state */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+ case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS:
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
+ KBASE_DEBUG_ASSERT(katom->affinity == recently_chosen_affinity);
+
+ /* Note: this is where the caller must've taken the runpool_irq.lock */
+
+ /* Check for affinity violations - if there are any, then we just ask
+ * the caller to requeue and try again later */
+ if (kbase_js_affinity_would_violate(kbdev, js, katom->affinity) != MALI_FALSE) {
+ /* Cause a re-attempt to submit from this slot on the next job complete */
+ kbase_js_affinity_slot_blocked_an_atom(kbdev, js);
+ /* Return to previous state */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
+ /* *** BREAK OUT: Transition to lower state *** */
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_AFFINITY_WOULD_VIOLATE, katom->kctx, katom, katom->jc, js, (u32) katom->affinity);
+ break;
+ }
+
+ /* No affinity violations would result, so the cores are ready */
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY;
+ /* *** BREAK OUT: Cores Ready *** */
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(MALI_FALSE, "Unhandled kbase_atom_coreref_state %d", katom->coreref_state);
+ break;
+ }
+ } while (retry != MALI_FALSE);
+
+ return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY);
+}
+
+void kbasep_js_job_check_deref_cores(kbase_device *kbdev, struct kbase_jd_atom *katom)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+
+ switch (katom->coreref_state) {
+ case KBASE_ATOM_COREREF_STATE_READY:
+ /* State where atom was submitted to the HW - just proceed to power-down */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
+
+ /* *** FALLTHROUGH *** */
+
+ case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
+ /* State where cores were registered */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
+ kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T, katom->affinity);
+
+ /* Note: We do not clear the state for kbase_js_affinity_slot_blocked_an_atom().
+ * That is handled after finishing the job. This might be slightly
+ * suboptimal for some corner cases, but is otherwise not a problem
+ * (and resolves itself after the next job completes). */
+
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ /* State where cores were requested, but not registered */
+ KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
+ kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T, katom->affinity);
+ break;
+
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ /* Initial state - nothing required */
+ KBASE_DEBUG_ASSERT(katom->affinity == 0);
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(MALI_FALSE, "Unhandled coreref_state: %d", katom->coreref_state);
+ break;
+ }
+
+ katom->affinity = 0;
+ katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
+}
+
+/*
+ * Note: this function is quite similar to kbasep_js_try_run_next_job_on_slot()
+ */
+mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int js, s8 *submit_count)
+{
+ kbasep_js_device_data *js_devdata;
+ mali_bool cores_ready;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ /* The caller of this function may not be aware of Ctx Attribute state changes so we
+ * must recheck if the given slot is still valid. Otherwise do not try to run.
+ */
+ if (kbase_js_can_run_job_on_slot_no_lock(kbdev, js)) {
+ /* Keep submitting while there's space to run a job on this job-slot,
+ * and there are jobs to get that match its requirements (see 'break'
+ * statement below) */
+ while (*submit_count < KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ && kbasep_jm_is_submit_slots_free(kbdev, js, NULL) != MALI_FALSE) {
+ kbase_jd_atom *dequeued_atom;
+ mali_bool has_job = MALI_FALSE;
+
+ /* Dequeue a job that matches the requirements */
+ has_job = kbasep_js_policy_dequeue_job(kbdev, js, &dequeued_atom);
+
+ if (has_job != MALI_FALSE) {
+ /* NOTE: since the runpool_irq lock is currently held and acts across
+ * all address spaces, any context whose busy refcount has reached
+ * zero won't yet be scheduled out whilst we're trying to run jobs
+ * from it */
+ kbase_context *parent_ctx = dequeued_atom->kctx;
+ mali_bool retain_success;
+
+ /* Retain/power up the cores it needs, check if cores are ready */
+ cores_ready = kbasep_js_job_check_ref_cores(kbdev, js, dequeued_atom);
+
+ if (cores_ready != MALI_TRUE && dequeued_atom->event_code != BASE_JD_EVENT_PM_EVENT) {
+ /* The job can't be submitted until the cores are ready, requeue the job */
+ kbasep_js_policy_enqueue_job(&kbdev->js_data.policy, dequeued_atom);
+ break;
+ }
+
+ /* ASSERT that the Policy picked a job from an allowed context */
+ KBASE_DEBUG_ASSERT(kbasep_js_is_submit_allowed(js_devdata, parent_ctx));
+
+ /* Retain the context to stop it from being scheduled out
+ * This is released when the job finishes */
+ retain_success = kbasep_js_runpool_retain_ctx_nolock(kbdev, parent_ctx);
+ KBASE_DEBUG_ASSERT(retain_success != MALI_FALSE);
+ CSTD_UNUSED(retain_success);
+
+ /* Retain the affinity on the slot */
+ kbase_js_affinity_retain_slot_cores(kbdev, js, dequeued_atom->affinity);
+
+ /* Check if this job needs the cycle counter enabled before submission */
+ kbasep_js_ref_permon_check_and_enable_cycle_counter(kbdev, dequeued_atom);
+
+ if (dequeued_atom->event_code == BASE_JD_EVENT_PM_EVENT) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_PM, "Rejecting atom due to BASE_JD_EVENT_PM_EVENT\n");
+ /* The job has failed due to the specified core group being unavailable */
+ kbase_jd_done(dequeued_atom, js, NULL, 0);
+ } else {
+ /* Submit the job */
+ kbase_job_submit_nolock(kbdev, dequeued_atom, js);
+
+ ++(*submit_count);
+ }
+ } else {
+ /* No more jobs - stop submitting for this slot */
+ break;
+ }
+ }
+ }
+
+ /* Indicate whether a retry in submission should be tried on a different
+ * dequeue function. These are the reasons why it *must* happen:
+ * - the KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ threshold was reached
+ * and new scheduling must be performed outside of IRQ mode.
+ *
+ * Failure to indicate this correctly could stop further jobs being processed.
+ *
+ * However, we do not _need_ to indicate a retry for the following:
+ * - kbasep_js_policy_dequeue_job() couldn't get a job. In which case,
+ * there's no point re-trying outside of IRQ, because the result will be
+ * the same until job dependencies are resolved, or user-space provides
+ * more jobs. In both those cases, we try to run jobs anyway, so
+ * processing does not stop.
+ * - kbasep_jm_is_submit_slots_free() was MALI_FALSE, indicating jobs were
+ * already running. When those jobs complete, that will still cause events
+ * that cause us to resume job submission.
+ * - kbase_js_can_run_job_on_slot_no_lock() was MALI_FALSE - this is for
+ * Ctx Attribute handling. That _can_ change outside of IRQ context, but
+ * is handled explicitly by kbasep_js_runpool_release_ctx_and_katom_retained_state().
+ */
+ return (mali_bool) (*submit_count >= KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ);
+}
+
+void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js)
+{
+ kbasep_js_device_data *js_devdata;
+ mali_bool has_job;
+ mali_bool cores_ready;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running > 0);
+
+ /* Keep submitting while there's space to run a job on this job-slot,
+ * and there are jobs to get that match its requirements (see 'break'
+ * statement below) */
+ if (kbasep_jm_is_submit_slots_free(kbdev, js, NULL) != MALI_FALSE) {
+ /* The caller of this function may not be aware of Ctx Attribute state changes so we
+ * must recheck if the given slot is still valid. Otherwise do not try to run.
+ */
+ if (kbase_js_can_run_job_on_slot_no_lock(kbdev, js)) {
+ do {
+ kbase_jd_atom *dequeued_atom;
+
+ /* Dequeue a job that matches the requirements */
+ has_job = kbasep_js_policy_dequeue_job(kbdev, js, &dequeued_atom);
+
+ if (has_job != MALI_FALSE) {
+ /* NOTE: since the runpool_irq lock is currently held and acts across
+ * all address spaces, any context whose busy refcount has reached
+ * zero won't yet be scheduled out whilst we're trying to run jobs
+ * from it */
+ kbase_context *parent_ctx = dequeued_atom->kctx;
+ mali_bool retain_success;
+
+ /* Retain/power up the cores it needs, check if cores are ready */
+ cores_ready = kbasep_js_job_check_ref_cores(kbdev, js, dequeued_atom);
+
+ if (cores_ready != MALI_TRUE && dequeued_atom->event_code != BASE_JD_EVENT_PM_EVENT) {
+ /* The job can't be submitted until the cores are ready, requeue the job */
+ kbasep_js_policy_enqueue_job(&kbdev->js_data.policy, dequeued_atom);
+ break;
+ }
+ /* ASSERT that the Policy picked a job from an allowed context */
+ KBASE_DEBUG_ASSERT(kbasep_js_is_submit_allowed(js_devdata, parent_ctx));
+
+ /* Retain the context to stop it from being scheduled out
+ * This is released when the job finishes */
+ retain_success = kbasep_js_runpool_retain_ctx_nolock(kbdev, parent_ctx);
+ KBASE_DEBUG_ASSERT(retain_success != MALI_FALSE);
+ CSTD_UNUSED(retain_success);
+
+ /* Retain the affinity on the slot */
+ kbase_js_affinity_retain_slot_cores(kbdev, js, dequeued_atom->affinity);
+
+ /* Check if this job needs the cycle counter enabled before submission */
+ kbasep_js_ref_permon_check_and_enable_cycle_counter(kbdev, dequeued_atom);
+
+ if (dequeued_atom->event_code == BASE_JD_EVENT_PM_EVENT) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_PM, "Rejecting atom due to BASE_JD_EVENT_PM_EVENT\n");
+ /* The job has failed due to the specified core group being unavailable */
+ kbase_jd_done(dequeued_atom, js, NULL, 0);
+ } else {
+ /* Submit the job */
+ kbase_job_submit_nolock(kbdev, dequeued_atom, js);
+ }
+ }
+
+ } while (kbasep_jm_is_submit_slots_free(kbdev, js, NULL) != MALI_FALSE && has_job != MALI_FALSE);
+ }
+ }
+}
+
+void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ mali_bool has_kctx;
+ kbase_context *head_kctx;
+ kbasep_js_kctx_info *js_kctx_info;
+ mali_bool is_runpool_full;
+ kbase_as *new_address_space;
+ unsigned long flags;
+ mali_bool head_kctx_suspended = MALI_FALSE;
+ int pm_active_err;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+ js_devdata = &kbdev->js_data;
+
+ /* We *don't* make a speculative check on whether we can fit a context in the
+ * runpool, because most of our use-cases assume 2 or fewer contexts, and
+ * so we will usually have enough address spaces free.
+ *
+ * In any case, the check will be done later on once we have a context */
+
+ /* Grab the context off head of queue - if there is one */
+ mutex_lock(&js_devdata->queue_mutex);
+ has_kctx = kbasep_js_policy_dequeue_head_ctx(&js_devdata->policy, &head_kctx);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ if (has_kctx == MALI_FALSE) {
+ /* No ctxs to run - nothing to do */
+ return;
+ }
+ js_kctx_info = &head_kctx->jctx.sched_info;
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: Dequeue Context %p", head_kctx);
+
+ pm_active_err = kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE);
+
+ /*
+ * Atomic transaction on the Context and Run Pool begins
+ */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+
+ /* Check to see if we shouldn't add the context to run Run Pool:
+ * - it can't take the specified context, and so is 'full'. This may be
+ * 'full' even when there are addres spaces available, since some contexts
+ * are allowed in whereas others may not due to HW workarounds
+ * - A suspend is taking place
+ * - The context is dying due to kbase_job_zap_context() */
+ is_runpool_full = check_is_runpool_full(kbdev, head_kctx);
+ if (is_runpool_full || pm_active_err || js_kctx_info->ctx.is_dying) {
+ /* Roll back the transaction so far and return */
+ mutex_unlock(&js_devdata->runpool_mutex);
+
+ /* Note: If a Power Management active reference was taken, it's released by
+ * this: */
+ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, head_kctx, !pm_active_err);
+
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ return;
+ }
+
+ /* From the point on, the Power Management active reference is released
+ * only if kbasep_js_runpool_release_ctx() causes the context to be removed
+ * from the runpool */
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, head_kctx, NULL, 0u, kbasep_js_trace_get_refcnt(kbdev, head_kctx));
+
+#if MALI_CUSTOMER_RELEASE == 0
+ if (js_devdata->nr_user_contexts_running == 0) {
+ /* Only when there are no other contexts submitting jobs:
+ * Latch in run-time job scheduler timeouts that were set through js_timeouts sysfs file */
+ if (kbdev->js_soft_stop_ticks != 0)
+ js_devdata->soft_stop_ticks = kbdev->js_soft_stop_ticks;
+
+ if (kbdev->js_hard_stop_ticks_ss != 0)
+ js_devdata->hard_stop_ticks_ss = kbdev->js_hard_stop_ticks_ss;
+
+ if (kbdev->js_hard_stop_ticks_nss != 0)
+ js_devdata->hard_stop_ticks_nss = kbdev->js_hard_stop_ticks_nss;
+
+ if (kbdev->js_reset_ticks_ss != 0)
+ js_devdata->gpu_reset_ticks_ss = kbdev->js_reset_ticks_ss;
+
+ if (kbdev->js_reset_ticks_nss != 0)
+ js_devdata->gpu_reset_ticks_nss = kbdev->js_reset_ticks_nss;
+ }
+#endif
+
+ runpool_inc_context_count(kbdev, head_kctx);
+ /* Cause any future waiter-on-termination to wait until the context is
+ * descheduled */
+ js_kctx_info->ctx.is_scheduled = MALI_TRUE;
+ wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+ /* Pick the free address space (guaranteed free by check_is_runpool_full() ) */
+ new_address_space = pick_free_addr_space(kbdev);
+
+ /* Lock the address space whilst working on it */
+ mutex_lock(&new_address_space->transaction_mutex);
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ /* Do all the necessaries to assign the address space (inc. update book-keeping info)
+ * Add the context to the Run Pool, and allow it to run jobs */
+ assign_and_activate_kctx_addr_space(kbdev, head_kctx, new_address_space);
+
+ /* NOTE: If Linux allows, then we can drop the new_address_space->transaction mutex here */
+
+ if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0) {
+ /* We need to retain it to keep the corresponding address space */
+ kbasep_js_runpool_retain_ctx_nolock(kbdev, head_kctx);
+ }
+
+ /* Re-check for suspending: a suspend could've occurred after we
+ * pm_context_active'd, and all the contexts could've been removed from the
+ * runpool before we took this lock. In this case, we don't want to allow
+ * this context to run jobs, we just want it out immediately.
+ *
+ * The DMB required to read the suspend flag was issued recently as part of
+ * the runpool_irq locking. If a suspend occurs *after* that lock was taken
+ * (i.e. this condition doesn't execute), then the kbasep_js_suspend() code
+ * will cleanup this context instead (by virtue of it being called strictly
+ * after the suspend flag is set, and will wait for this lock to drop) */
+ if (kbase_pm_is_suspending(kbdev)) {
+ /* Cause it to leave at some later point */
+ mali_bool retained;
+ retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, head_kctx);
+ KBASE_DEBUG_ASSERT(retained);
+ kbasep_js_clear_submit_allowed(js_devdata, head_kctx);
+ head_kctx_suspended = MALI_TRUE;
+ }
+
+ /* Try to run the next job, in case this context has jobs that match the
+ * job slot requirements, but none of the other currently running contexts
+ * do */
+ kbasep_js_try_run_next_job_nolock(kbdev);
+
+ /* Transaction complete */
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ mutex_unlock(&new_address_space->transaction_mutex);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ /* Note: after this point, the context could potentially get scheduled out immediately */
+
+ if (head_kctx_suspended) {
+ /* Finishing forcing out the context due to a suspend. Use a variant of
+ * kbasep_js_runpool_release_ctx() that doesn't schedule a new context,
+ * to prevent a risk of recursion back into this function */
+ kbasep_js_runpool_release_ctx_no_schedule(kbdev, head_kctx);
+ }
+ return;
+}
+
+void kbasep_js_schedule_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_kctx_info *js_kctx_info;
+ kbasep_js_device_data *js_devdata;
+ mali_bool is_scheduled;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* This must never be attempted whilst suspending - i.e. it should only
+ * happen in response to a syscall from a user-space thread */
+ BUG_ON(kbase_pm_is_suspending(kbdev));
+
+ kbase_pm_request_l2_caches(kbdev);
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ /* Mark the context as privileged */
+ js_kctx_info->ctx.flags |= KBASE_CTX_FLAG_PRIVILEGED;
+
+ is_scheduled = js_kctx_info->ctx.is_scheduled;
+ if (is_scheduled == MALI_FALSE) {
+ mali_bool is_runpool_full;
+
+ /* Add the context to the runpool */
+ mutex_lock(&js_devdata->queue_mutex);
+ kbasep_js_policy_enqueue_ctx(&js_devdata->policy, kctx);
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ mutex_lock(&js_devdata->runpool_mutex);
+ {
+ is_runpool_full = check_is_runpool_full(kbdev, kctx);
+ if (is_runpool_full != MALI_FALSE) {
+ /* Evict jobs from the NEXT registers to free an AS asap */
+ kbasep_js_runpool_evict_next_jobs(kbdev, kctx);
+ }
+ }
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ /* Fast-starting requires the jsctx_mutex to be dropped, because it works on multiple ctxs */
+
+ if (is_runpool_full != MALI_FALSE) {
+ /* Evict non-running contexts from the runpool */
+ kbasep_js_runpool_attempt_fast_start_ctx(kbdev, NULL);
+ }
+ /* Try to schedule the context in */
+ kbasep_js_try_schedule_head_ctx(kbdev);
+
+ /* Wait for the context to be scheduled in */
+ wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait, kctx->jctx.sched_info.ctx.is_scheduled == MALI_TRUE);
+ } else {
+ /* Already scheduled in - We need to retain it to keep the corresponding address space */
+ kbasep_js_runpool_retain_ctx(kbdev, kctx);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ }
+}
+
+void kbasep_js_release_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_kctx_info *js_kctx_info;
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* We don't need to use the address space anymore */
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ js_kctx_info->ctx.flags &= (~KBASE_CTX_FLAG_PRIVILEGED);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ kbase_pm_release_l2_caches(kbdev);
+
+ /* Release the context - it will be scheduled out if there is no pending job */
+ kbasep_js_runpool_release_ctx(kbdev, kctx);
+}
+
+void kbasep_js_job_done_slot_irq(kbase_jd_atom *katom, int slot_nr,
+ ktime_t *end_timestamp,
+ kbasep_js_atom_done_code done_code)
+{
+ kbase_device *kbdev;
+ kbasep_js_policy *js_policy;
+ kbasep_js_device_data *js_devdata;
+ mali_bool submit_retry_needed = MALI_TRUE; /* If we don't start jobs here, start them from the workqueue */
+ ktime_t tick_diff;
+ u64 microseconds_spent = 0u;
+ kbase_context *parent_ctx;
+
+ KBASE_DEBUG_ASSERT(katom);
+ parent_ctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(parent_ctx);
+ kbdev = parent_ctx->kbdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ js_devdata = &kbdev->js_data;
+ js_policy = &kbdev->js_data.policy;
+
+ lockdep_assert_held(&js_devdata->runpool_irq.lock);
+
+ /*
+ * Release resources before submitting new jobs (bounds the refcount of
+ * the resource to BASE_JM_SUBMIT_SLOTS)
+ */
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+ kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP, slot_nr), NULL);
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+ /* Check if submitted jobs no longer require the cycle counter to be enabled */
+ kbasep_js_deref_permon_check_and_disable_cycle_counter(kbdev, katom);
+
+ /* Release the affinity from the slot - must happen before next submission to this slot */
+ kbase_js_affinity_release_slot_cores(kbdev, slot_nr, katom->affinity);
+ kbase_js_debug_log_current_affinities(kbdev);
+ /* Calculate the job's time used */
+ if (end_timestamp != NULL) {
+ /* Only calculating it for jobs that really run on the HW (e.g. removed
+ * from next jobs never actually ran, so really did take zero time) */
+ tick_diff = ktime_sub(*end_timestamp, katom->start_timestamp);
+
+ microseconds_spent = ktime_to_ns(tick_diff);
+ do_div(microseconds_spent, 1000);
+
+ /* Round up time spent to the minimum timer resolution */
+ if (microseconds_spent < KBASEP_JS_TICK_RESOLUTION_US)
+ microseconds_spent = KBASEP_JS_TICK_RESOLUTION_US;
+ }
+
+ /* Log the result of the job (completion status, and time spent). */
+ kbasep_js_policy_log_job_result(js_policy, katom, microseconds_spent);
+ /* Determine whether the parent context's timeslice is up */
+ if (kbasep_js_policy_should_remove_ctx(js_policy, parent_ctx) != MALI_FALSE)
+ kbasep_js_clear_submit_allowed(js_devdata, parent_ctx);
+
+ if (done_code & KBASE_JS_ATOM_DONE_START_NEW_ATOMS) {
+ /* Submit a new job (if there is one) to help keep the GPU's HEAD and NEXT registers full */
+ KBASE_TRACE_ADD_SLOT(kbdev, JS_JOB_DONE_TRY_RUN_NEXT_JOB, parent_ctx, katom, katom->jc, slot_nr);
+
+ submit_retry_needed = kbasep_js_try_run_next_job_on_slot_irq_nolock(kbdev, slot_nr, &kbdev->slot_submit_count_irq[slot_nr]);
+ }
+
+ if (submit_retry_needed != MALI_FALSE || katom->event_code == BASE_JD_EVENT_STOPPED) {
+ /* The extra condition on STOPPED jobs is needed because they may be
+ * the only job present, but they won't get re-run until the JD work
+ * queue activates. Crucially, work queues can run items out of order
+ * e.g. on different CPUs, so being able to submit from the IRQ handler
+ * is not a good indication that we don't need to run jobs; the
+ * submitted job could be processed on the work-queue *before* the
+ * stopped job, even though it was submitted after.
+ *
+ * Therefore, we must try to run it, otherwise it might not get run at
+ * all after this. */
+
+ KBASE_TRACE_ADD_SLOT(kbdev, JS_JOB_DONE_RETRY_NEEDED, parent_ctx, katom, katom->jc, slot_nr);
+ kbasep_js_set_job_retry_submit_slot(katom, slot_nr);
+ }
+}
+
+void kbasep_js_suspend(kbase_device *kbdev)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ int i;
+ u16 retained = 0u;
+ int nr_privileged_ctx = 0;
+ KBASE_DEBUG_ASSERT(kbdev);
+ KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ /* Prevent all contexts from submitting */
+ js_devdata->runpool_irq.submit_allowed = 0;
+
+ /* Retain each of the contexts, so we can cause it to leave even if it had
+ * no refcount to begin with */
+ for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
+ kbasep_js_per_as_data *js_per_as_data = &js_devdata->runpool_irq.per_as_data[i];
+ kbase_context *kctx = js_per_as_data->kctx;
+ retained = retained << 1;
+
+ if (kctx) {
+ ++(js_per_as_data->as_busy_refcount);
+ retained |= 1u;
+ /* We can only cope with up to 1 privileged context - the
+ * instrumented context. It'll be suspended by disabling
+ * instrumentation */
+ if (kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED)
+ KBASE_DEBUG_ASSERT(++nr_privileged_ctx == 1);
+ }
+ }
+ CSTD_UNUSED(nr_privileged_ctx);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ /* De-ref the previous retain to ensure each context gets pulled out
+ * sometime later. */
+ for (i = 0;
+ i < BASE_MAX_NR_AS;
+ ++i, retained = retained >> 1) {
+ kbasep_js_per_as_data *js_per_as_data = &js_devdata->runpool_irq.per_as_data[i];
+ kbase_context *kctx = js_per_as_data->kctx;
+
+ if (retained & 1u)
+ kbasep_js_runpool_release_ctx(kbdev,kctx);
+ }
+
+ /* Caller must wait for all Power Manager active references to be dropped */
+}
+
+void kbasep_js_resume(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ int i;
+ KBASE_DEBUG_ASSERT(kbdev);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+ /* Schedule in as many contexts as address spaces. This also starts atoms. */
+ for (i = 0 ; i < kbdev->nr_hw_address_spaces; ++i)
+ {
+ kbasep_js_try_schedule_head_ctx(kbdev);
+ }
+ /* JS Resume complete */
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js.h
new file mode 100755
index 00000000000..20abb90d58a
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js.h
@@ -0,0 +1,927 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler APIs.
+ */
+
+#ifndef _KBASE_JS_H_
+#define _KBASE_JS_H_
+
+#include <malisw/mali_malisw.h>
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_js_policy.h"
+#include "mali_kbase_defs.h"
+
+#include "mali_kbase_js_ctx_attr.h"
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js Job Scheduler Internal APIs
+ * @{
+ *
+ * These APIs are Internal to KBase and are available for use by the
+ * @ref kbase_js_policy "Job Scheduler Policy APIs"
+ */
+
+/**
+ * @brief Initialize the Job Scheduler
+ *
+ * The kbasep_js_device_data sub-structure of \a kbdev must be zero
+ * initialized before passing to the kbasep_js_devdata_init() function. This is
+ * to give efficient error path code.
+ */
+mali_error kbasep_js_devdata_init(kbase_device * const kbdev);
+
+/**
+ * @brief Halt the Job Scheduler.
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ *
+ */
+void kbasep_js_devdata_halt(kbase_device *kbdev);
+
+/**
+ * @brief Terminate the Job Scheduler
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ */
+void kbasep_js_devdata_term(kbase_device *kbdev);
+
+/**
+ * @brief Initialize the Scheduling Component of a kbase_context on the Job Scheduler.
+ *
+ * This effectively registers a kbase_context with a Job Scheduler.
+ *
+ * It does not register any jobs owned by the kbase_context with the scheduler.
+ * Those must be separately registered by kbasep_js_add_job().
+ *
+ * The kbase_context must be zero intitialized before passing to the
+ * kbase_js_init() function. This is to give efficient error path code.
+ */
+mali_error kbasep_js_kctx_init(kbase_context * const kctx);
+
+/**
+ * @brief Terminate the Scheduling Component of a kbase_context on the Job Scheduler
+ *
+ * This effectively de-registers a kbase_context from its Job Scheduler
+ *
+ * It is safe to call this on a kbase_context that has never had or failed
+ * initialization of its jctx.sched_info member, to give efficient error-path
+ * code.
+ *
+ * For this to work, the kbase_context must be zero intitialized before passing
+ * to the kbase_js_init() function.
+ *
+ * It is a Programming Error to call this whilst there are still jobs
+ * registered with this context.
+ */
+void kbasep_js_kctx_term(kbase_context *kctx);
+
+/**
+ * @brief Add a job chain to the Job Scheduler, and take necessary actions to
+ * schedule the context/run the job.
+ *
+ * This atomically does the following:
+ * - Update the numbers of jobs information
+ * - Add the job to the run pool if necessary (part of init_job)
+ *
+ * Once this is done, then an appropriate action is taken:
+ * - If the ctx is scheduled, it attempts to start the next job (which might be
+ * this added job)
+ * - Otherwise, and if this is the first job on the context, it enqueues it on
+ * the Policy Queue
+ *
+ * The Policy's Queue can be updated by this in the following ways:
+ * - In the above case that this is the first job on the context
+ * - If the job is high priority and the context is not scheduled, then it
+ * could cause the Policy to schedule out a low-priority context, allowing
+ * this context to be scheduled in.
+ *
+ * If the context is already scheduled on the RunPool, then adding a job to it
+ * is guarenteed not to update the Policy Queue. And so, the caller is
+ * guarenteed to not need to try scheduling a context from the Run Pool - it
+ * can safely assert that the result is MALI_FALSE.
+ *
+ * It is a programming error to have more than U32_MAX jobs in flight at a time.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_irq::lock (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ *
+ * @return MALI_TRUE indicates that the Policy Queue was updated, and so the
+ * caller will need to try scheduling a context onto the Run Pool.
+ * @return MALI_FALSE indicates that no updates were made to the Policy Queue,
+ * so no further action is required from the caller. This is \b always returned
+ * when the context is currently scheduled.
+ */
+mali_bool kbasep_js_add_job(kbase_context *kctx, kbase_jd_atom *atom);
+
+/**
+ * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'.
+ *
+ * Completely removing a job requires several calls:
+ * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of
+ * the atom
+ * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler
+ * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the
+ * remaining state held as part of the job having been run.
+ *
+ * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions.
+ *
+ * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool:
+ * - it has not been removed with kbasep_js_policy_dequeue_job()
+ * - or, it has not been removed with kbasep_js_policy_dequeue_job_irq()
+ *
+ * Do not use this for removing jobs being killed by kbase_jd_cancel() - use
+ * kbasep_js_remove_cancelled_job() instead.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ *
+ */
+void kbasep_js_remove_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *atom);
+
+/**
+ * @brief Completely remove a job chain from the Job Scheduler, in the case
+ * where the job chain was cancelled.
+ *
+ * This is a variant of kbasep_js_remove_job() that takes care of removing all
+ * of the retained state too. This is generally useful for cancelled atoms,
+ * which need not be handled in an optimal way.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool:
+ * - it is not being killed with kbasep_jd_cancel()
+ * - or, it has not been removed with kbasep_js_policy_dequeue_job()
+ * - or, it has not been removed with kbasep_js_policy_dequeue_job_irq()
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
+ * obtained internally)
+ */
+void kbasep_js_remove_cancelled_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
+ * it will be used internally.
+ *
+ * @return value != MALI_FALSE if the retain succeeded, and the context will not be scheduled out.
+ * @return MALI_FALSE if the retain failed (because the context is being/has been scheduled out).
+ */
+mali_bool kbasep_js_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locks must be held by the caller:
+ * - kbasep_js_device_data::runpool_irq::lock
+ *
+ * @return value != MALI_FALSE if the retain succeeded, and the context will not be scheduled out.
+ * @return MALI_FALSE if the retain failed (because the context is being/has been scheduled out).
+ */
+mali_bool kbasep_js_runpool_retain_ctx_nolock(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * @brief Lookup a context in the Run Pool based upon its current address space
+ * and ensure that is stays scheduled in.
+ *
+ * The context is refcounted as being busy to prevent it from scheduling
+ * out. It must be released with kbasep_js_runpool_release_ctx() when it is no
+ * longer required to stay scheduled in.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the kbasep_js_device_data::runpoool_irq::lock, because
+ * it will be used internally.
+ *
+ * @return a valid kbase_context on success, which has been refcounted as being busy.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+kbase_context *kbasep_js_runpool_lookup_ctx(kbase_device *kbdev, int as_nr);
+
+/**
+ * @brief Handling the requeuing/killing of a context that was evicted from the
+ * policy queue or runpool.
+ *
+ * This should be used whenever handing off a context that has been evicted
+ * from the policy queue or the runpool:
+ * - If the context is not dying and has jobs, it gets re-added to the policy
+ * queue
+ * - Otherwise, it is not added
+ *
+ * In addition, if the context is dying the jobs are killed asynchronously.
+ *
+ * In all cases, the Power Manager active reference is released
+ * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true. \a
+ * has_pm_ref must be set to false whenever the context was not previously in
+ * the runpool and does not hold a Power Manager active refcount. Note that
+ * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an
+ * active refcount even though they weren't in the runpool.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ */
+void kbasep_js_runpool_requeue_or_kill_ctx(kbase_device *kbdev, kbase_context *kctx, mali_bool has_pm_ref);
+
+/**
+ * @brief Release a refcount of a context being busy, allowing it to be
+ * scheduled out.
+ *
+ * When the refcount reaches zero and the context \em might be scheduled out
+ * (depending on whether the Scheudling Policy has deemed it so, or if it has run
+ * out of jobs).
+ *
+ * If the context does get scheduled out, then The following actions will be
+ * taken as part of deschduling a context:
+ * - For the context being descheduled:
+ * - If the context is in the processing of dying (all the jobs are being
+ * removed from it), then descheduling also kills off any jobs remaining in the
+ * context.
+ * - If the context is not dying, and any jobs remain after descheduling the
+ * context then it is re-enqueued to the Policy's Queue.
+ * - Otherwise, the context is still known to the scheduler, but remains absent
+ * from the Policy Queue until a job is next added to it.
+ * - In all descheduling cases, the Power Manager active reference (obtained
+ * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()).
+ *
+ * Whilst the context is being descheduled, this also handles actions that
+ * cause more atoms to be run:
+ * - Attempt submitting atoms when the Context Attributes on the Runpool have
+ * changed. This is because the context being scheduled out could mean that
+ * there are more opportunities to run atoms.
+ * - Attempt submitting to a slot that was previously blocked due to affinity
+ * restrictions. This is usually only necessary when releasing a context
+ * happens as part of completing a previous job, but is harmless nonetheless.
+ * - Attempt scheduling in a new context (if one is available), and if necessary,
+ * running a job from that new context.
+ *
+ * Unlike retaining a context in the runpool, this function \b cannot be called
+ * from IRQ context.
+ *
+ * It is a programming error to call this on a \a kctx that is not currently
+ * scheduled, or that already has a zero refcount.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
+ * it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional
+ * actions from completing an atom.
+ *
+ * This is usually called as part of completing an atom and releasing the
+ * refcount on the context held by the atom.
+ *
+ * Therefore, the extra actions carried out are part of handling actions queued
+ * on a completed atom, namely:
+ * - Releasing the atom's context attributes
+ * - Retrying the submission on a particular slot, because we couldn't submit
+ * on that slot from an IRQ handler.
+ *
+ * The locking conditions of this function are the same as those for
+ * kbasep_js_runpool_release_ctx()
+ */
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * @brief Try to submit the next job on a \b particular slot whilst in IRQ
+ * context, and whilst the caller already holds the runpool IRQ spinlock.
+ *
+ * \a *submit_count will be checked against
+ * KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ to see whether too many jobs have
+ * been submitted. This is to prevent the IRQ handler looping over lots of GPU
+ * NULL jobs, which may complete whilst the IRQ handler is still processing. \a
+ * submit_count itself should point to kbase_device::slot_submit_count_irq[ \a js ],
+ * which is initialized to zero on entry to the IRQ handler.
+ *
+ * The following locks must be held by the caller:
+ * - kbasep_js_device_data::runpool_irq::lock
+ *
+ * @return truthful (i.e. != MALI_FALSE) if too many jobs were submitted from
+ * IRQ. Therefore, this indicates that submission should be retried from a
+ * work-queue, by using
+ * kbasep_js_try_run_next_job_on_slot_nolock()/kbase_js_try_run_jobs_on_slot().
+ * @return MALI_FALSE if submission had no problems: the GPU is either already
+ * full of jobs in the HEAD and NEXT registers, or we were able to get enough
+ * jobs from the Run Pool to fill the GPU's HEAD and NEXT registers.
+ */
+mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int js, s8 *submit_count);
+
+/**
+ * @brief Try to submit the next job on a particular slot, outside of IRQ context
+ *
+ * This obtains the Job Slot lock for the duration of the call only.
+ *
+ * Unlike kbasep_js_try_run_next_job_on_slot_irq_nolock(), there is no limit on
+ * submission, because eventually IRQ_THROTTLE will kick in to prevent us
+ * getting stuck in a loop of submitting GPU NULL jobs. This is because the IRQ
+ * handler will be delayed, and so this function will eventually fill up the
+ * space in our software 'submitted' slot (kbase_jm_slot::submitted).
+ *
+ * In addition, there's no return value - we'll run the maintenence functions
+ * on the Policy's Run Pool, but if there's nothing there after that, then the
+ * Run Pool is truely empty, and so no more action need be taken.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_device_data::runpool_mutex
+ * - it must hold kbasep_js_device_data::runpool_irq::lock
+ *
+ * This must only be called whilst the GPU is powered - for example, when
+ * kbdev->jsdata.nr_user_contexts_running > 0.
+ *
+ * @note The caller \em might be holding one of the
+ * kbasep_js_kctx_info::ctx::jsctx_mutex locks.
+ *
+ */
+void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js);
+
+/**
+ * @brief Try to submit the next job for each slot in the system, outside of IRQ context
+ *
+ * This will internally call kbasep_js_try_run_next_job_on_slot_nolock(), so similar
+ * locking conditions on the caller are required.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_device_data::runpool_mutex
+ * - it must hold kbasep_js_device_data::runpool_irq::lock
+ *
+ * @note The caller \em might be holding one of the
+ * kbasep_js_kctx_info::ctx::jsctx_mutex locks.
+ *
+ */
+void kbasep_js_try_run_next_job_nolock(kbase_device *kbdev);
+
+/**
+ * @brief Try to schedule the next context onto the Run Pool
+ *
+ * This checks whether there's space in the Run Pool to accommodate a new
+ * context. If so, it attempts to dequeue a context from the Policy Queue, and
+ * submit this to the Run Pool.
+ *
+ * If the scheduling succeeds, then it also makes a call to
+ * kbasep_js_try_run_next_job_nolock(), in case the new context has jobs
+ * matching the job slot requirements, but no other currently scheduled context
+ * has such jobs.
+ *
+ * Whilst attempting to obtain a context from the policy queue, or add a
+ * context to the runpool, this function takes a Power Manager active
+ * reference. If for any reason a context cannot be added to the runpool, any
+ * reference obtained is released once the context is safely back in the policy
+ * queue. If no context was available on the policy queue, any reference
+ * obtained is released too.
+ *
+ * Only if the context gets placed in the runpool does the Power Manager active
+ * reference stay held (and is effectively now owned by the
+ * context/runpool). It is only released once the context is removed
+ * completely, or added back to the policy queue
+ * (e.g. kbasep_js_runpool_release_ctx(),
+ * kbasep_js_runpool_requeue_or_kill_ctx(), etc)
+ *
+ * If any of these actions fail (Run Pool Full, Policy Queue empty, can't get
+ * PM active reference due to a suspend, etc) then any actions taken are rolled
+ * back and the function just returns normally.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
+ * it will be used internally.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
+ * be used internally.
+ *
+ */
+void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev);
+
+/**
+ * @brief Schedule in a privileged context
+ *
+ * This schedules a context in regardless of the context priority.
+ * If the runpool is full, a context will be forced out of the runpool and the function will wait
+ * for the new context to be scheduled in.
+ * The context will be kept scheduled in (and the corresponding address space reserved) until
+ * kbasep_js_release_privileged_ctx is called).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
+ * it will be used internally.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
+ * be used internally.
+ *
+ */
+void kbasep_js_schedule_privileged_ctx(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * @brief Release a privileged context, allowing it to be scheduled out.
+ *
+ * See kbasep_js_runpool_release_ctx for potential side effects.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
+ * it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally)
+ *
+ */
+void kbasep_js_release_privileged_ctx(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * @brief Handle the Job Scheduler component for the IRQ of a job finishing
+ *
+ * This does the following:
+ * -# Releases resources held by the atom
+ * -# if \a end_timestamp != NULL, updates the runpool's notion of time spent by a running ctx
+ * -# determines whether a context should be marked for scheduling out
+ * -# examines done_code to determine whether to submit the next job on the slot
+ * (picking from all ctxs in the runpool)
+ *
+ * In addition, if submission didn't happen (the submit-from-IRQ function
+ * failed or done_code didn't specify to start new jobs), then this sets a
+ * message on katom that submission needs to be retried from the worker thread.
+ *
+ * Normally, the time calculated from end_timestamp is rounded up to the
+ * minimum time precision. Therefore, to ensure the job is recorded as not
+ * spending any time, then set end_timestamp to NULL. For example, this is necessary when
+ * evicting jobs from JSn_HEAD_NEXT (because they didn't actually run).
+ *
+ * NOTE: It's possible to move the steps (2) and (3) (inc calculating job's time
+ * used) into the worker (outside of IRQ context), but this may allow a context
+ * to use up to twice as much timeslice as is allowed by the policy. For
+ * policies that order by time spent, this is not a problem for overall
+ * 'fairness', but can still increase latency between contexts.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_device_data::runpoool_irq::lock
+ */
+void kbasep_js_job_done_slot_irq(kbase_jd_atom *katom, int slot_nr,
+ ktime_t *end_timestamp,
+ kbasep_js_atom_done_code done_code);
+
+/**
+ * @brief Try to submit the next job on each slot
+ *
+ * The following locks may be used:
+ * - kbasep_js_device_data::runpool_mutex
+ * - kbasep_js_device_data::runpool_irq::lock
+ */
+void kbase_js_try_run_jobs(kbase_device *kbdev);
+
+/**
+ * @brief Try to submit the next job on a specfic slot
+ *
+ * The following locking conditions are made on the caller:
+ *
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_irq::lock (as this
+ * will be obtained internally)
+ *
+ */
+void kbase_js_try_run_jobs_on_slot(kbase_device *kbdev, int js);
+
+/**
+ * @brief Handle releasing cores for power management and affinity management,
+ * ensuring that cores are powered down and affinity tracking is updated.
+ *
+ * This must only be called on an atom that is not currently running, and has
+ * not been re-queued onto the context (and so does not need locking)
+ *
+ * This function enters at the following @ref kbase_atom_coreref_state states:
+ * - NO_CORES_REQUESTED
+ * - WAITING_FOR_REQUESTED_CORES
+ * - RECHECK_AFFINITY
+ * - READY
+ *
+ * It transitions the above states back to NO_CORES_REQUESTED by the end of the
+ * function call (possibly via intermediate states).
+ *
+ * No locks need be held by the caller, since this takes the necessary Power
+ * Management locks itself. The runpool_irq.lock is not taken (the work that
+ * requires it is handled by kbase_js_affinity_submit_to_blocked_slots() ).
+ *
+ * @note The corresponding kbasep_js_job_check_ref_cores() is private to the
+ * Job Scheduler, and is called automatically when running the next job.
+ */
+void kbasep_js_job_check_deref_cores(kbase_device *kbdev, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Suspend the job scheduler during a Power Management Suspend event.
+ *
+ * Causes all contexts to be removed from the runpool, and prevents any
+ * contexts from (re)entering the runpool.
+ *
+ * This does not handle suspending the one privileged context: the caller must
+ * instead do this by by suspending the GPU HW Counter Instrumentation.
+ *
+ * This will eventually cause all Power Management active references held by
+ * contexts on the runpool to be released, without running any more atoms.
+ *
+ * The caller must then wait for all Power Mangement active refcount to become
+ * zero before completing the suspend.
+ *
+ * The emptying mechanism may take some time to complete, since it can wait for
+ * jobs to complete naturally instead of forcing them to end quickly. However,
+ * this is bounded by the Job Scheduling Policy's Job Timeouts. Hence, this
+ * function is guaranteed to complete in a finite time whenever the Job
+ * Scheduling Policy implements Job Timeouts (such as those done by CFS).
+ */
+void kbasep_js_suspend(kbase_device *kbdev);
+
+/**
+ * @brief Resume the Job Scheduler after a Power Management Resume event.
+ *
+ * This restores the actions from kbasep_js_suspend():
+ * - Schedules contexts back into the runpool
+ * - Resumes running atoms on the GPU
+ */
+void kbasep_js_resume(kbase_device *kbdev);
+
+
+/*
+ * Helpers follow
+ */
+
+/**
+ * @brief Check that a context is allowed to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * As with any mali_bool, never test the return value with MALI_TRUE.
+ *
+ * The caller must hold kbasep_js_device_data::runpool_irq::lock.
+ */
+static INLINE mali_bool kbasep_js_is_submit_allowed(kbasep_js_device_data *js_devdata, kbase_context *kctx)
+{
+ u16 test_bit;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled != MALI_FALSE);
+
+ test_bit = (u16) (1u << kctx->as_nr);
+
+ return (mali_bool) (js_devdata->runpool_irq.submit_allowed & test_bit);
+}
+
+/**
+ * @brief Allow a context to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold kbasep_js_device_data::runpool_irq::lock.
+ */
+static INLINE void kbasep_js_set_submit_allowed(kbasep_js_device_data *js_devdata, kbase_context *kctx)
+{
+ u16 set_bit;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled != MALI_FALSE);
+
+ set_bit = (u16) (1u << kctx->as_nr);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+
+ js_devdata->runpool_irq.submit_allowed |= set_bit;
+}
+
+/**
+ * @brief Prevent a context from submitting more jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold kbasep_js_device_data::runpool_irq::lock.
+ */
+static INLINE void kbasep_js_clear_submit_allowed(kbasep_js_device_data *js_devdata, kbase_context *kctx)
+{
+ u16 clear_bit;
+ u16 clear_mask;
+
+ /* Ensure context really is scheduled in */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled != MALI_FALSE);
+
+ clear_bit = (u16) (1u << kctx->as_nr);
+ clear_mask = ~clear_bit;
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+
+ js_devdata->runpool_irq.submit_allowed &= clear_mask;
+}
+
+/**
+ * @brief Manage the 'retry_submit_on_slot' part of a kbase_jd_atom
+ */
+static INLINE void kbasep_js_clear_job_retry_submit(kbase_jd_atom *atom)
+{
+ atom->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
+}
+
+/**
+ * Mark a slot as requiring resubmission by carrying that information on a
+ * completing atom.
+ *
+ * @note This can ASSERT in debug builds if the submit slot has been set to
+ * something other than the current value for @a js. This is because you might
+ * be unintentionally stopping more jobs being submitted on the old submit
+ * slot, and that might cause a scheduling-hang.
+ *
+ * @note If you can guarantee that the atoms for the original slot will be
+ * submitted on some other slot, then call kbasep_js_clear_job_retry_submit()
+ * first to silence the ASSERT.
+ */
+static INLINE void kbasep_js_set_job_retry_submit_slot(kbase_jd_atom *atom, int js)
+{
+ KBASE_DEBUG_ASSERT(0 <= js && js <= BASE_JM_MAX_NR_SLOTS);
+ KBASE_DEBUG_ASSERT(atom->retry_submit_on_slot == KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID
+ || atom->retry_submit_on_slot == js);
+
+ atom->retry_submit_on_slot = js;
+}
+
+/**
+ * Create an initial 'invalid' atom retained state, that requires no
+ * atom-related work to be done on releasing with
+ * kbasep_js_runpool_release_ctx_and_katom_retained_state()
+ */
+static INLINE void kbasep_js_atom_retained_state_init_invalid(kbasep_js_atom_retained_state *retained_state)
+{
+ retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
+ retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
+ retained_state->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
+}
+
+/**
+ * Copy atom state that can be made available after jd_done_nolock() is called
+ * on that atom.
+ */
+static INLINE void kbasep_js_atom_retained_state_copy(kbasep_js_atom_retained_state *retained_state, const kbase_jd_atom *katom)
+{
+ retained_state->event_code = katom->event_code;
+ retained_state->core_req = katom->core_req;
+ retained_state->retry_submit_on_slot = katom->retry_submit_on_slot;
+}
+
+/**
+ * @brief Determine whether an atom has finished (given its retained state),
+ * and so should be given back to userspace/removed from the system.
+ *
+ * Reasons for an atom not finishing include:
+ * - Being soft-stopped (and so, the atom should be resubmitted sometime later)
+ *
+ * @param[in] katom_retained_state the retained state of the atom to check
+ * @return MALI_FALSE if the atom has not finished
+ * @return !=MALI_FALSE if the atom has finished
+ */
+static INLINE mali_bool kbasep_js_has_atom_finished(const kbasep_js_atom_retained_state *katom_retained_state)
+{
+ return (mali_bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT);
+}
+
+/**
+ * @brief Determine whether a kbasep_js_atom_retained_state is valid
+ *
+ * An invalid kbasep_js_atom_retained_state is allowed, and indicates that the
+ * code should just ignore it.
+ *
+ * @param[in] katom_retained_state the atom's retained state to check
+ * @return MALI_FALSE if the retained state is invalid, and can be ignored
+ * @return !=MALI_FALSE if the retained state is valid
+ */
+static INLINE mali_bool kbasep_js_atom_retained_state_is_valid(const kbasep_js_atom_retained_state *katom_retained_state)
+{
+ return (mali_bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
+}
+
+static INLINE mali_bool kbasep_js_get_atom_retry_submit_slot(const kbasep_js_atom_retained_state *katom_retained_state, int *res)
+{
+ int js = katom_retained_state->retry_submit_on_slot;
+ *res = js;
+ return (mali_bool) (js >= 0);
+}
+
+#if KBASE_DEBUG_DISABLE_ASSERTS == 0
+/**
+ * Debug Check the refcount of a context. Only use within ASSERTs
+ *
+ * Obtains kbasep_js_device_data::runpool_irq::lock
+ *
+ * @return negative value if the context is not scheduled in
+ * @return current refcount of the context if it is scheduled in. The refcount
+ * is not guarenteed to be kept constant.
+ */
+static INLINE int kbasep_js_debug_check_ctx_refcount(kbase_device *kbdev, kbase_context *kctx)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ int result = -1;
+ int as_nr;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID)
+ result = js_devdata->runpool_irq.per_as_data[as_nr].as_busy_refcount;
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ return result;
+}
+#endif /* KBASE_DEBUG_DISABLE_ASSERTS == 0 */
+
+/**
+ * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the
+ * context is guarenteed to be already previously retained.
+ *
+ * It is a programming error to supply the \a as_nr of a context that has not
+ * been previously retained/has a busy refcount of zero. The only exception is
+ * when there is no ctx in \a as_nr (NULL returned).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the kbasep_js_device_data::runpoool_irq::lock, because
+ * it will be used internally.
+ *
+ * @return a valid kbase_context on success, with a refcount that is guarenteed
+ * to be non-zero and unmodified by this function.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+static INLINE kbase_context *kbasep_js_runpool_lookup_ctx_noretain(kbase_device *kbdev, int as_nr)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ kbase_context *found_kctx;
+ kbasep_js_per_as_data *js_per_as_data;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+ js_devdata = &kbdev->js_data;
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+
+ found_kctx = js_per_as_data->kctx;
+ KBASE_DEBUG_ASSERT(found_kctx == NULL || js_per_as_data->as_busy_refcount > 0);
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ return found_kctx;
+}
+
+/**
+ * This will provide a conversion from time (us) to ticks of the gpu clock
+ * based on the minimum available gpu frequency.
+ * This is usually good to compute best/worst case (where the use of current
+ * frequency is not valid due to DVFS).
+ * e.g.: when you need the number of cycles to guarantee you won't wait for
+ * longer than 'us' time (you might have a shorter wait).
+ */
+static INLINE u32 kbasep_js_convert_us_to_gpu_ticks_min_freq(kbase_device *kbdev, u32 us)
+{
+ u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_min;
+ KBASE_DEBUG_ASSERT(0 != gpu_freq);
+ return us * (gpu_freq / 1000);
+}
+
+/**
+ * This will provide a conversion from time (us) to ticks of the gpu clock
+ * based on the maximum available gpu frequency.
+ * This is usually good to compute best/worst case (where the use of current
+ * frequency is not valid due to DVFS).
+ * e.g.: When you need the number of cycles to guarantee you'll wait at least
+ * 'us' amount of time (but you might wait longer).
+ */
+static INLINE u32 kbasep_js_convert_us_to_gpu_ticks_max_freq(kbase_device *kbdev, u32 us)
+{
+ u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_max;
+ KBASE_DEBUG_ASSERT(0 != gpu_freq);
+ return us * (u32) (gpu_freq / 1000);
+}
+
+/**
+ * This will provide a conversion from ticks of the gpu clock to time (us)
+ * based on the minimum available gpu frequency.
+ * This is usually good to compute best/worst case (where the use of current
+ * frequency is not valid due to DVFS).
+ * e.g.: When you need to know the worst-case wait that 'ticks' cycles will
+ * take (you guarantee that you won't wait any longer than this, but it may
+ * be shorter).
+ */
+static INLINE u32 kbasep_js_convert_gpu_ticks_to_us_min_freq(kbase_device *kbdev, u32 ticks)
+{
+ u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_min;
+ KBASE_DEBUG_ASSERT(0 != gpu_freq);
+ return ticks / gpu_freq * 1000;
+}
+
+/**
+ * This will provide a conversion from ticks of the gpu clock to time (us)
+ * based on the maximum available gpu frequency.
+ * This is usually good to compute best/worst case (where the use of current
+ * frequency is not valid due to DVFS).
+ * e.g.: When you need to know the best-case wait for 'tick' cycles (you
+ * guarantee to be waiting for at least this long, but it may be longer).
+ */
+static INLINE u32 kbasep_js_convert_gpu_ticks_to_us_max_freq(kbase_device *kbdev, u32 ticks)
+{
+ u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_max;
+ KBASE_DEBUG_ASSERT(0 != gpu_freq);
+ return ticks / gpu_freq * 1000;
+}
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_affinity.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_affinity.c
new file mode 100755
index 00000000000..79e7035b8d4
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_affinity.c
@@ -0,0 +1,407 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js_affinity.c
+ * Base kernel affinity manager APIs
+ */
+
+#include <kbase/src/common/mali_kbase.h>
+#include "mali_kbase_js_affinity.h"
+
+#if defined(CONFIG_MALI_DEBUG) && 0 /* disabled to avoid compilation warnings */
+
+STATIC void debug_get_binary_string(const u64 n, char *buff, const int size)
+{
+ unsigned int i;
+ for (i = 0; i < size; i++)
+ buff[i] = ((n >> i) & 1) ? '*' : '-';
+
+ buff[size] = '\0';
+}
+
+#define N_CORES 8
+STATIC void debug_print_affinity_info(const kbase_device *kbdev, const kbase_jd_atom *katom, int js, u64 affinity)
+{
+ char buff[N_CORES + 1];
+ char buff2[N_CORES + 1];
+ base_jd_core_req core_req = katom->atom->core_req;
+ u64 shader_present_bitmap = kbdev->shader_present_bitmap;
+
+ debug_get_binary_string(shader_present_bitmap, buff, N_CORES);
+ debug_get_binary_string(affinity, buff2, N_CORES);
+
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Job: COH FS CS T CF V JS | GPU:12345678 | AFF:12345678");
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, " %s %s %s %s %s %s %u | %s | %s", core_req & BASE_JD_REQ_COHERENT_GROUP ? "*" : "-", core_req & BASE_JD_REQ_FS ? "*" : "-", core_req & BASE_JD_REQ_CS ? "*" : "-", core_req & BASE_JD_REQ_T ? "*" : "-", core_req & BASE_JD_REQ_CF ? "*" : "-", core_req & BASE_JD_REQ_V ? "*" : "-", js, buff, buff2);
+}
+
+#endif /* CONFIG_MALI_DEBUG */
+
+STATIC INLINE mali_bool affinity_job_uses_high_cores(kbase_device *kbdev, kbase_jd_atom *katom)
+{
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+ kbase_context *kctx;
+ kbase_context_flags ctx_flags;
+
+ kctx = katom->kctx;
+ ctx_flags = kctx->jctx.sched_info.ctx.flags;
+
+ /* In this HW Workaround, compute-only jobs/contexts use the high cores
+ * during a core-split, all other contexts use the low cores. */
+ return (mali_bool) ((katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) != 0 || (ctx_flags & KBASE_CTX_FLAG_HINT_ONLY_COMPUTE) != 0);
+ }
+ return MALI_FALSE;
+}
+
+/**
+ * @brief Decide whether a split in core affinity is required across job slots
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_device_data::runpool_irq::lock
+ *
+ * @param kbdev The kbase device structure of the device
+ * @return MALI_FALSE if a core split is not required
+ * @return != MALI_FALSE if a core split is required.
+ */
+STATIC INLINE mali_bool kbase_affinity_requires_split(kbase_device *kbdev)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+ s8 nr_compute_ctxs = kbasep_js_ctx_attr_count_on_runpool(kbdev, KBASEP_JS_CTX_ATTR_COMPUTE);
+ s8 nr_noncompute_ctxs = kbasep_js_ctx_attr_count_on_runpool(kbdev, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+ /* In this case, a mix of Compute+Non-Compute determines whether a
+ * core-split is required, to ensure jobs with different numbers of RMUs
+ * don't use the same cores.
+ *
+ * When it's entirely compute, or entirely non-compute, then no split is
+ * required.
+ *
+ * A context can be both Compute and Non-compute, in which case this will
+ * correctly decide that a core-split is required. */
+
+ return (mali_bool) (nr_compute_ctxs > 0 && nr_noncompute_ctxs > 0);
+ }
+ return MALI_FALSE;
+}
+
+mali_bool kbase_js_can_run_job_on_slot_no_lock(kbase_device *kbdev, int js)
+{
+ /*
+ * Here are the reasons for using job slot 2:
+ * - BASE_HW_ISSUE_8987 (which is entirely used for that purpose)
+ * - In absence of the above, then:
+ * - Atoms with BASE_JD_REQ_COHERENT_GROUP
+ * - But, only when there aren't contexts with
+ * KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, because the atoms that run on
+ * all cores on slot 1 could be blocked by those using a coherent group
+ * on slot 2
+ * - And, only when you actually have 2 or more coregroups - if you only
+ * have 1 coregroup, then having jobs for slot 2 implies they'd also be
+ * for slot 1, meaning you'll get interference from them. Jobs able to
+ * run on slot 2 could also block jobs that can only run on slot 1
+ * (tiler jobs)
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ return MALI_TRUE;
+
+ if (js != 2)
+ return MALI_TRUE;
+
+ /* Only deal with js==2 now: */
+ if (kbdev->gpu_props.num_core_groups > 1) {
+ /* Only use slot 2 in the 2+ coregroup case */
+ if (kbasep_js_ctx_attr_is_attr_on_runpool(kbdev, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) == MALI_FALSE) {
+ /* ...But only when we *don't* have atoms that run on all cores */
+
+ /* No specific check for BASE_JD_REQ_COHERENT_GROUP atoms - the policy will sort that out */
+ return MALI_TRUE;
+ }
+ }
+
+ /* Above checks failed mean we shouldn't use slot 2 */
+ return MALI_FALSE;
+}
+
+/*
+ * As long as it has been decided to have a deeper modification of
+ * what job scheduler, power manager and affinity manager will
+ * implement, this function is just an intermediate step that
+ * assumes:
+ * - all working cores will be powered on when this is called.
+ * - largest current configuration is a T658 (2x4 cores).
+ * - It has been decided not to have hardcoded values so the low
+ * and high cores in a core split will be evently distributed.
+ * - Odd combinations of core requirements have been filtered out
+ * and do not get to this function (e.g. CS+T+NSS is not
+ * supported here).
+ * - This function is frequently called and can be optimized,
+ * (see notes in loops), but as the functionallity will likely
+ * be modified, optimization has not been addressed.
+*/
+mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kbase_jd_atom *katom, int js)
+{
+ base_jd_core_req core_req = katom->core_req;
+ unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
+ u64 core_availability_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+
+ core_availability_mask = kbase_pm_ca_get_core_mask(kbdev);
+
+ /*
+ * If no cores are currently available (core availability policy is
+ * transitioning) then fail.
+ */
+ if (0 == core_availability_mask)
+ {
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ *affinity = 0;
+ return MALI_FALSE;
+ }
+
+ KBASE_DEBUG_ASSERT(js >= 0);
+
+ if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) == BASE_JD_REQ_T)
+ {
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ /* Tiler only job, bit 0 needed to enable tiler but no shader cores required */
+ *affinity = 1;
+ return MALI_TRUE;
+ }
+
+ if (1 == kbdev->gpu_props.num_cores) {
+ /* trivial case only one core, nothing to do */
+ *affinity = core_availability_mask;
+ } else if (kbase_affinity_requires_split(kbdev) == MALI_FALSE) {
+ if ((core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
+ if (js == 0 || num_core_groups == 1) {
+ /* js[0] and single-core-group systems just get the first core group */
+ *affinity = kbdev->gpu_props.props.coherency_info.group[0].core_mask & core_availability_mask;
+ } else {
+ /* js[1], js[2] use core groups 0, 1 for dual-core-group systems */
+ u32 core_group_idx = ((u32) js) - 1;
+ KBASE_DEBUG_ASSERT(core_group_idx < num_core_groups);
+ *affinity = kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask & core_availability_mask;
+
+ /* If the job is specifically targeting core group 1 and the core
+ * availability policy is keeping that core group off, then fail */
+ if (*affinity == 0 && core_group_idx == 1 && kbdev->pm.cg1_disabled == MALI_TRUE)
+ katom->event_code = BASE_JD_EVENT_PM_EVENT;
+ }
+ } else {
+ /* All cores are available when no core split is required */
+ *affinity = core_availability_mask;
+ }
+ } else {
+ /* Core split required - divide cores in two non-overlapping groups */
+ u64 low_bitmap, high_bitmap;
+ int n_high_cores = kbdev->gpu_props.num_cores >> 1;
+ KBASE_DEBUG_ASSERT(1 == num_core_groups);
+ KBASE_DEBUG_ASSERT(0 != n_high_cores);
+
+ /* compute the reserved high cores bitmap */
+ high_bitmap = ~0;
+ /* note: this can take a while, optimization desirable */
+ while (n_high_cores != hweight32(high_bitmap & kbdev->shader_present_bitmap))
+ high_bitmap = high_bitmap << 1;
+
+ high_bitmap &= core_availability_mask;
+ low_bitmap = core_availability_mask ^ high_bitmap;
+
+ if (affinity_job_uses_high_cores(kbdev, katom))
+ *affinity = high_bitmap;
+ else
+ *affinity = low_bitmap;
+ }
+
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+
+ /*
+ * If no cores are currently available in the desired core group(s)
+ * (core availability policy is transitioning) then fail.
+ */
+ if (*affinity == 0)
+ return MALI_FALSE;
+
+ /* Enable core 0 if tiler required */
+ if (core_req & BASE_JD_REQ_T)
+ *affinity = *affinity | 1;
+
+ return MALI_TRUE;
+}
+
+STATIC INLINE mali_bool kbase_js_affinity_is_violating(kbase_device *kbdev, u64 *affinities)
+{
+ /* This implementation checks whether the two slots involved in Generic thread creation
+ * have intersecting affinity. This is due to micro-architectural issues where a job in
+ * slot A targetting cores used by slot B could prevent the job in slot B from making
+ * progress until the job in slot A has completed.
+ *
+ * @note It just so happens that this restriction also allows
+ * BASE_HW_ISSUE_8987 to be worked around by placing on job slot 2 the
+ * atoms from ctxs with KBASE_CTX_FLAG_HINT_ONLY_COMPUTE flag set
+ */
+ u64 affinity_set_left;
+ u64 affinity_set_right;
+ u64 intersection;
+ KBASE_DEBUG_ASSERT(affinities != NULL);
+
+ affinity_set_left = affinities[1];
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+ /* The left set also includes those on the Fragment slot when
+ * we are using the HW workaround for BASE_HW_ISSUE_8987 */
+ affinity_set_left |= affinities[0];
+ }
+
+ affinity_set_right = affinities[2];
+
+ /* A violation occurs when any bit in the left_set is also in the right_set */
+ intersection = affinity_set_left & affinity_set_right;
+
+ return (mali_bool) (intersection != (u64) 0u);
+}
+
+mali_bool kbase_js_affinity_would_violate(kbase_device *kbdev, int js, u64 affinity)
+{
+ kbasep_js_device_data *js_devdata;
+ u64 new_affinities[BASE_JM_MAX_NR_SLOTS];
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ memcpy(new_affinities, js_devdata->runpool_irq.slot_affinities, sizeof(js_devdata->runpool_irq.slot_affinities));
+
+ new_affinities[js] |= affinity;
+
+ return kbase_js_affinity_is_violating(kbdev, new_affinities);
+}
+
+void kbase_js_affinity_retain_slot_cores(kbase_device *kbdev, int js, u64 affinity)
+{
+ kbasep_js_device_data *js_devdata;
+ u64 cores;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_DEBUG_ASSERT(kbase_js_affinity_would_violate(kbdev, js, affinity) == MALI_FALSE);
+
+ cores = affinity;
+ while (cores) {
+ int bitnum = fls64(cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ s8 cnt;
+
+ KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum] < BASE_JM_SUBMIT_SLOTS);
+
+ cnt = ++(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
+
+ if (cnt == 1)
+ js_devdata->runpool_irq.slot_affinities[js] |= bit;
+
+ cores &= ~bit;
+ }
+
+}
+
+void kbase_js_affinity_release_slot_cores(kbase_device *kbdev, int js, u64 affinity)
+{
+ kbasep_js_device_data *js_devdata;
+ u64 cores;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ cores = affinity;
+ while (cores) {
+ int bitnum = fls64(cores) - 1;
+ u64 bit = 1ULL << bitnum;
+ s8 cnt;
+
+ KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum] > 0);
+
+ cnt = --(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
+
+ if (0 == cnt)
+ js_devdata->runpool_irq.slot_affinities[js] &= ~bit;
+
+ cores &= ~bit;
+ }
+
+}
+
+void kbase_js_affinity_slot_blocked_an_atom(kbase_device *kbdev, int js)
+{
+ kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
+ js_devdata = &kbdev->js_data;
+
+ js_devdata->runpool_irq.slots_blocked_on_affinity |= 1u << js;
+}
+
+void kbase_js_affinity_submit_to_blocked_slots(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ u16 slots;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running != 0);
+
+ /* Must take a copy because submitting jobs will update this member. */
+ slots = js_devdata->runpool_irq.slots_blocked_on_affinity;
+
+ while (slots) {
+ int bitnum = fls(slots) - 1;
+ u16 bit = 1u << bitnum;
+ slots &= ~bit;
+
+ KBASE_TRACE_ADD_SLOT(kbdev, JS_AFFINITY_SUBMIT_TO_BLOCKED, NULL, NULL, 0u, bitnum);
+
+ /* must update this before we submit, incase it's set again */
+ js_devdata->runpool_irq.slots_blocked_on_affinity &= ~bit;
+
+ kbasep_js_try_run_next_job_on_slot_nolock(kbdev, bitnum);
+
+ /* Don't re-read slots_blocked_on_affinity after this - it could loop for a long time */
+ }
+}
+
+#if KBASE_TRACE_ENABLE != 0
+void kbase_js_debug_log_current_affinities(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ int slot_nr;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ for (slot_nr = 0; slot_nr < 3; ++slot_nr)
+ KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_AFFINITY_CURRENT, NULL, NULL, 0u, slot_nr, (u32) js_devdata->runpool_irq.slot_affinities[slot_nr]);
+}
+#endif /* KBASE_TRACE_ENABLE != 0 */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_affinity.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_affinity.h
new file mode 100755
index 00000000000..3319d74bf0a
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_affinity.h
@@ -0,0 +1,154 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js_affinity.h
+ * Affinity Manager internal APIs.
+ */
+
+#ifndef _KBASE_JS_AFFINITY_H_
+#define _KBASE_JS_AFFINITY_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js_affinity Affinity Manager internal APIs.
+ * @{
+ *
+ */
+
+/**
+ * @brief Decide whether it is possible to submit a job to a particular job slot in the current status
+ *
+ * Will check if submitting to the given job slot is allowed in the current
+ * status. For example using job slot 2 while in soft-stoppable state and only
+ * having 1 coregroup is not allowed by the policy. This function should be
+ * called prior to submitting a job to a slot to make sure policy rules are not
+ * violated.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_device_data::runpool_irq::lock
+ *
+ * @param kbdev The kbase device structure of the device
+ * @param js Job slot number to check for allowance
+ */
+mali_bool kbase_js_can_run_job_on_slot_no_lock(kbase_device *kbdev, int js);
+
+/**
+ * @brief Compute affinity for a given job.
+ *
+ * Currently assumes an all-on/all-off power management policy.
+ * Also assumes there is at least one core with tiler available.
+ *
+ * Returns MALI_TRUE if a valid affinity was chosen, MALI_FALSE if
+ * no cores were available.
+ *
+ * @param[out] affinity Affinity bitmap computed
+ * @param kbdev The kbase device structure of the device
+ * @param katom Job chain of which affinity is going to be found
+ * @param js Slot the job chain is being submitted
+
+ */
+mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kbase_jd_atom *katom, int js);
+
+/**
+ * @brief Determine whether a proposed \a affinity on job slot \a js would
+ * cause a violation of affinity restrictions.
+ *
+ * The following locks must be held by the caller:
+ * - kbasep_js_device_data::runpool_irq::lock
+ */
+mali_bool kbase_js_affinity_would_violate(kbase_device *kbdev, int js, u64 affinity);
+
+/**
+ * @brief Affinity tracking: retain cores used by a slot
+ *
+ * The following locks must be held by the caller:
+ * - kbasep_js_device_data::runpool_irq::lock
+ */
+void kbase_js_affinity_retain_slot_cores(kbase_device *kbdev, int js, u64 affinity);
+
+/**
+ * @brief Affinity tracking: release cores used by a slot
+ *
+ * Cores \b must be released as soon as a job is dequeued from a slot's 'submit
+ * slots', and before another job is submitted to those slots. Otherwise, the
+ * refcount could exceed the maximum number submittable to a slot,
+ * BASE_JM_SUBMIT_SLOTS.
+ *
+ * The following locks must be held by the caller:
+ * - kbasep_js_device_data::runpool_irq::lock
+ */
+void kbase_js_affinity_release_slot_cores(kbase_device *kbdev, int js, u64 affinity);
+
+/**
+ * @brief Register a slot as blocking atoms due to affinity violations
+ *
+ * Once a slot has been registered, we must check after every atom completion
+ * (including those on different slots) to see if the slot can be
+ * unblocked. This is done by calling
+ * kbase_js_affinity_submit_to_blocked_slots(), which will also deregister the
+ * slot if it no long blocks atoms due to affinity violations.
+ *
+ * The following locks must be held by the caller:
+ * - kbasep_js_device_data::runpool_irq::lock
+ */
+void kbase_js_affinity_slot_blocked_an_atom(kbase_device *kbdev, int js);
+
+/**
+ * @brief Submit to job slots that have registered that an atom was blocked on
+ * the slot previously due to affinity violations.
+ *
+ * This submits to all slots registered by
+ * kbase_js_affinity_slot_blocked_an_atom(). If submission succeeded, then the
+ * slot is deregistered as having blocked atoms due to affinity
+ * violations. Otherwise it stays registered, and the next atom to complete
+ * must attempt to submit to the blocked slots again.
+ *
+ * This must only be called whilst the GPU is powered - for example, when
+ * kbdev->jsdata.nr_user_contexts_running > 0.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_device_data::runpool_mutex
+ * - it must hold kbasep_js_device_data::runpool_irq::lock
+ */
+void kbase_js_affinity_submit_to_blocked_slots(kbase_device *kbdev);
+
+/**
+ * @brief Output to the Trace log the current tracked affinities on all slots
+ */
+#if KBASE_TRACE_ENABLE != 0
+void kbase_js_debug_log_current_affinities(kbase_device *kbdev);
+#else /* KBASE_TRACE_ENABLE != 0 */
+static INLINE void kbase_js_debug_log_current_affinities(kbase_device *kbdev)
+{
+}
+#endif /* KBASE_TRACE_ENABLE != 0 */
+
+ /** @} *//* end group kbase_js_affinity */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_AFFINITY_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_ctx_attr.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_ctx_attr.c
new file mode 100755
index 00000000000..a9d982775ce
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_ctx_attr.c
@@ -0,0 +1,307 @@
+/*
+ *
+ * (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#include <kbase/src/common/mali_kbase.h>
+
+/*
+ * Private functions follow
+ */
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, retain that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return MALI_TRUE indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
+ */
+STATIC mali_bool kbasep_js_ctx_attr_runpool_retain_attr(kbase_device *kbdev, kbase_context *kctx, kbasep_js_ctx_attr attribute)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_info;
+ mali_bool runpool_state_changed = MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ BUG_ON(!mutex_is_locked(&js_kctx_info->ctx.jsctx_mutex));
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
+
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled != MALI_FALSE);
+
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != MALI_FALSE) {
+ KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] < S8_MAX);
+ ++(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+ if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 1) {
+ /* First refcount indicates a state change */
+ runpool_state_changed = MALI_TRUE;
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_RUNPOOL, kctx, NULL, 0u, attribute);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, release that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return MALI_TRUE indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
+ */
+STATIC mali_bool kbasep_js_ctx_attr_runpool_release_attr(kbase_device *kbdev, kbase_context *kctx, kbasep_js_ctx_attr attribute)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_kctx_info *js_kctx_info;
+ mali_bool runpool_state_changed = MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ BUG_ON(!mutex_is_locked(&js_kctx_info->ctx.jsctx_mutex));
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled != MALI_FALSE);
+
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != MALI_FALSE) {
+ KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] > 0);
+ --(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+ if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 0) {
+ /* Last de-refcount indicates a state change */
+ runpool_state_changed = MALI_TRUE;
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_RUNPOOL, kctx, NULL, 0u, attribute);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+/**
+ * @brief Retain a certain attribute on a ctx, also retaining it on the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return MALI_TRUE indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
+ */
+STATIC mali_bool kbasep_js_ctx_attr_ctx_retain_attr(kbase_device *kbdev, kbase_context *kctx, kbasep_js_ctx_attr attribute)
+{
+ kbasep_js_kctx_info *js_kctx_info;
+ mali_bool runpool_state_changed = MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ BUG_ON(!mutex_is_locked(&js_kctx_info->ctx.jsctx_mutex));
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] < U32_MAX);
+
+ ++(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+ if (js_kctx_info->ctx.is_scheduled != MALI_FALSE && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
+ /* Only ref-count the attribute on the runpool for the first time this contexts sees this attribute */
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_CTX, kctx, NULL, 0u, attribute);
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, attribute);
+ }
+
+ return runpool_state_changed;
+}
+
+/**
+ * @brief Release a certain attribute on a ctx, also releasign it from the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return MALI_TRUE indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
+ */
+STATIC mali_bool kbasep_js_ctx_attr_ctx_release_attr(kbase_device *kbdev, kbase_context *kctx, kbasep_js_ctx_attr attribute)
+{
+ kbasep_js_kctx_info *js_kctx_info;
+ mali_bool runpool_state_changed = MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ BUG_ON(!mutex_is_locked(&js_kctx_info->ctx.jsctx_mutex));
+ KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] > 0);
+
+ if (js_kctx_info->ctx.is_scheduled != MALI_FALSE && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
+ /* Only de-ref-count the attribute on the runpool when this is the last ctx-reference to it */
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, attribute);
+ KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_CTX, kctx, NULL, 0u, attribute);
+ }
+
+ /* De-ref must happen afterwards, because kbasep_js_ctx_attr_runpool_release() needs to check it too */
+ --(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+ return runpool_state_changed;
+}
+
+/*
+ * More commonly used public functions
+ */
+
+void kbasep_js_ctx_attr_set_initial_attrs(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_kctx_info *js_kctx_info;
+ mali_bool runpool_state_changed = MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) != MALI_FALSE) {
+ /* This context never submits, so don't track any scheduling attributes */
+ return;
+ }
+
+ /* Transfer attributes held in the context flags for contexts that have submit enabled */
+
+ if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_HINT_ONLY_COMPUTE) != MALI_FALSE) {
+ /* Compute context */
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+ }
+ /* NOTE: Whether this is a non-compute context depends on the jobs being
+ * run, e.g. it might be submitting jobs with BASE_JD_REQ_ONLY_COMPUTE */
+
+ /* ... More attributes can be added here ... */
+
+ /* The context should not have been scheduled yet, so ASSERT if this caused
+ * runpool state changes (note that other threads *can't* affect the value
+ * of runpool_state_changed, due to how it's calculated) */
+ KBASE_DEBUG_ASSERT(runpool_state_changed == MALI_FALSE);
+ CSTD_UNUSED(runpool_state_changed);
+}
+
+void kbasep_js_ctx_attr_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx)
+{
+ mali_bool runpool_state_changed;
+ int i;
+
+ /* Retain any existing attributes */
+ for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (kbasep_js_ctx_attr) i) != MALI_FALSE) {
+ /* The context is being scheduled in, so update the runpool with the new attributes */
+ runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (kbasep_js_ctx_attr) i);
+
+ /* We don't need to know about state changed, because retaining a
+ * context occurs on scheduling it, and that itself will also try
+ * to run new atoms */
+ CSTD_UNUSED(runpool_state_changed);
+ }
+ }
+}
+
+mali_bool kbasep_js_ctx_attr_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx)
+{
+ mali_bool runpool_state_changed = MALI_FALSE;
+ int i;
+
+ /* Release any existing attributes */
+ for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+ if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (kbasep_js_ctx_attr) i) != MALI_FALSE) {
+ /* The context is being scheduled out, so update the runpool on the removed attributes */
+ runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (kbasep_js_ctx_attr) i);
+ }
+ }
+
+ return runpool_state_changed;
+}
+
+void kbasep_js_ctx_attr_ctx_retain_atom(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom)
+{
+ mali_bool runpool_state_changed = MALI_FALSE;
+ base_jd_core_req core_req;
+
+ KBASE_DEBUG_ASSERT(katom);
+ core_req = katom->core_req;
+
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+ else
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+ if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+ /* Atom that can run on slot1 or slot2, and can use all cores */
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+ }
+
+ /* We don't need to know about state changed, because retaining an
+ * atom occurs on adding it, and that itself will also try to run
+ * new atoms */
+ CSTD_UNUSED(runpool_state_changed);
+}
+
+mali_bool kbasep_js_ctx_attr_ctx_release_atom(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state)
+{
+ mali_bool runpool_state_changed = MALI_FALSE;
+ base_jd_core_req core_req;
+
+ KBASE_DEBUG_ASSERT(katom_retained_state);
+ core_req = katom_retained_state->core_req;
+
+ /* No-op for invalid atoms */
+ if (kbasep_js_atom_retained_state_is_valid(katom_retained_state) == MALI_FALSE)
+ return MALI_FALSE;
+
+ if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+ else
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+ if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+ /* Atom that can run on slot1 or slot2, and can use all cores */
+ runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+ }
+
+ return runpool_state_changed;
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_ctx_attr.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_ctx_attr.h
new file mode 100755
index 00000000000..df60b7d6479
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_ctx_attr.h
@@ -0,0 +1,156 @@
+/*
+ *
+ * (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js_ctx_attr.h
+ * Job Scheduler Context Attribute APIs
+ */
+
+#ifndef _KBASE_JS_CTX_ATTR_H_
+#define _KBASE_JS_CTX_ATTR_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+
+/**
+ * Set the initial attributes of a context (when context create flags are set)
+ *
+ * Requires:
+ * - Hold the jsctx_mutex
+ */
+void kbasep_js_ctx_attr_set_initial_attrs(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * Retain all attributes of a context
+ *
+ * This occurs on scheduling in the context on the runpool (but after
+ * is_scheduled is set)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ */
+void kbasep_js_ctx_attr_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * Release all attributes of a context
+ *
+ * This occurs on scheduling out the context from the runpool (but before
+ * is_scheduled is cleared)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ *
+ * @return MALI_TRUE indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
+ */
+mali_bool kbasep_js_ctx_attr_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * Retain all attributes of an atom
+ *
+ * This occurs on adding an atom to a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ */
+void kbasep_js_ctx_attr_ctx_retain_atom(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom);
+
+/**
+ * Release all attributes of an atom, given its retained state.
+ *
+ * This occurs after (permanently) removing an atom from a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * This is a no-op when \a katom_retained_state is invalid.
+ *
+ * @return MALI_TRUE indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
+ */
+mali_bool kbasep_js_ctx_attr_ctx_release_atom(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static INLINE s8 kbasep_js_ctx_attr_count_on_runpool(kbase_device *kbdev, kbasep_js_ctx_attr attribute)
+{
+ kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_devdata = &kbdev->js_data;
+
+ return js_devdata->runpool_irq.ctx_attr_ref_count[attribute];
+}
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static INLINE mali_bool kbasep_js_ctx_attr_is_attr_on_runpool(kbase_device *kbdev, kbasep_js_ctx_attr attribute)
+{
+ /* In general, attributes are 'on' when they have a non-zero refcount (note: the refcount will never be < 0) */
+ return (mali_bool) kbasep_js_ctx_attr_count_on_runpool(kbdev, attribute);
+}
+
+/**
+ * Requires:
+ * - jsctx mutex
+ */
+static INLINE mali_bool kbasep_js_ctx_attr_is_attr_on_ctx(kbase_context *kctx, kbasep_js_ctx_attr attribute)
+{
+ kbasep_js_kctx_info *js_kctx_info;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ /* In general, attributes are 'on' when they have a refcount (which should never be < 0) */
+ return (mali_bool) (js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+}
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_DEFS_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_defs.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_defs.h
new file mode 100755
index 00000000000..1e7e97499bb
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_defs.h
@@ -0,0 +1,474 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler Type Definitions
+ */
+
+#ifndef _KBASE_JS_DEFS_H_
+#define _KBASE_JS_DEFS_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+/* Forward decls */
+struct kbase_device;
+struct kbase_jd_atom;
+
+
+/* Types used by the policies must go here */
+enum {
+ /** Context will not submit any jobs */
+ KBASE_CTX_FLAG_SUBMIT_DISABLED = (1u << 0),
+
+ /** Set if the context uses an address space and should be kept scheduled in */
+ KBASE_CTX_FLAG_PRIVILEGED = (1u << 1),
+
+ /** Kernel-side equivalent of BASE_CONTEXT_HINT_ONLY_COMPUTE. Non-mutable after creation flags set */
+ KBASE_CTX_FLAG_HINT_ONLY_COMPUTE = (1u << 2)
+
+ /* NOTE: Add flags for other things, such as 'is scheduled', and 'is dying' */
+};
+
+typedef u32 kbase_context_flags;
+
+typedef struct kbasep_atom_req {
+ base_jd_core_req core_req;
+ kbase_context_flags ctx_req;
+ u32 device_nr;
+} kbasep_atom_req;
+
+#include "mali_kbase_js_policy_cfs.h"
+
+/* Wrapper Interface - doxygen is elsewhere */
+typedef union kbasep_js_policy {
+#ifdef KBASE_JS_POLICY_AVAILABLE_FCFS
+ kbasep_js_policy_fcfs fcfs;
+#endif
+#ifdef KBASE_JS_POLICY_AVAILABLE_CFS
+ kbasep_js_policy_cfs cfs;
+#endif
+} kbasep_js_policy;
+
+/* Wrapper Interface - doxygen is elsewhere */
+typedef union kbasep_js_policy_ctx_info {
+#ifdef KBASE_JS_POLICY_AVAILABLE_FCFS
+ kbasep_js_policy_fcfs_ctx fcfs;
+#endif
+#ifdef KBASE_JS_POLICY_AVAILABLE_CFS
+ kbasep_js_policy_cfs_ctx cfs;
+#endif
+} kbasep_js_policy_ctx_info;
+
+/* Wrapper Interface - doxygen is elsewhere */
+typedef union kbasep_js_policy_job_info {
+#ifdef KBASE_JS_POLICY_AVAILABLE_FCFS
+ kbasep_js_policy_fcfs_job fcfs;
+#endif
+#ifdef KBASE_JS_POLICY_AVAILABLE_CFS
+ kbasep_js_policy_cfs_job cfs;
+#endif
+} kbasep_js_policy_job_info;
+
+
+/** Callback function run on all of a context's jobs registered with the Job
+ * Scheduler */
+typedef void (*kbasep_js_policy_ctx_job_cb)(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Maximum number of jobs that can be submitted to a job slot whilst
+ * inside the IRQ handler.
+ *
+ * This is important because GPU NULL jobs can complete whilst the IRQ handler
+ * is running. Otherwise, it potentially allows an unlimited number of GPU NULL
+ * jobs to be submitted inside the IRQ handler, which increases IRQ latency.
+ */
+#define KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ 2
+
+/**
+ * @brief the IRQ_THROTTLE time in microseconds
+ *
+ * This will be converted via the GPU's clock frequency into a cycle-count.
+ *
+ * @note we can make an estimate of the GPU's frequency by periodically
+ * sampling its CYCLE_COUNT register
+ */
+#define KBASE_JS_IRQ_THROTTLE_TIME_US 20
+
+/**
+ * @brief Context attributes
+ *
+ * Each context attribute can be thought of as a boolean value that caches some
+ * state information about either the runpool, or the context:
+ * - In the case of the runpool, it is a cache of "Do any contexts owned by
+ * the runpool have attribute X?"
+ * - In the case of a context, it is a cache of "Do any atoms owned by the
+ * context have attribute X?"
+ *
+ * The boolean value of the context attributes often affect scheduling
+ * decisions, such as affinities to use and job slots to use.
+ *
+ * To accomodate changes of state in the context, each attribute is refcounted
+ * in the context, and in the runpool for all running contexts. Specifically:
+ * - The runpool holds a refcount of how many contexts in the runpool have this
+ * attribute.
+ * - The context holds a refcount of how many atoms have this attribute.
+ *
+ * Examples of use:
+ * - Finding out when there are a mix of @ref BASE_CONTEXT_HINT_ONLY_COMPUTE
+ * and ! @ref BASE_CONTEXT_HINT_ONLY_COMPUTE contexts in the runpool
+ */
+typedef enum {
+ /** Attribute indicating a context that contains Compute jobs. That is,
+ * @ref BASE_CONTEXT_HINT_ONLY_COMPUTE is \b set and/or the context has jobs of type
+ * @ref BASE_JD_REQ_ONLY_COMPUTE
+ *
+ * @note A context can be both 'Compute' and 'Non Compute' if it contains
+ * both types of jobs.
+ */
+ KBASEP_JS_CTX_ATTR_COMPUTE,
+
+ /** Attribute indicating a context that contains Non-Compute jobs. That is,
+ * the context has some jobs that are \b not of type @ref
+ * BASE_JD_REQ_ONLY_COMPUTE. The context usually has
+ * BASE_CONTEXT_HINT_COMPUTE \b clear, but this depends on the HW
+ * workarounds in use in the Job Scheduling Policy.
+ *
+ * @note A context can be both 'Compute' and 'Non Compute' if it contains
+ * both types of jobs.
+ */
+ KBASEP_JS_CTX_ATTR_NON_COMPUTE,
+
+ /** Attribute indicating that a context contains compute-job atoms that
+ * aren't restricted to a coherent group, and can run on all cores.
+ *
+ * Specifically, this is when the atom's \a core_req satisfy:
+ * - (\a core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T) // uses slot 1 or slot 2
+ * - && !(\a core_req & BASE_JD_REQ_COHERENT_GROUP) // not restricted to coherent groups
+ *
+ * Such atoms could be blocked from running if one of the coherent groups
+ * is being used by another job slot, so tracking this context attribute
+ * allows us to prevent such situations.
+ *
+ * @note This doesn't take into account the 1-coregroup case, where all
+ * compute atoms would effectively be able to run on 'all cores', but
+ * contexts will still not always get marked with this attribute. Instead,
+ * it is the caller's responsibility to take into account the number of
+ * coregroups when interpreting this attribute.
+ *
+ * @note Whilst Tiler atoms are normally combined with
+ * BASE_JD_REQ_COHERENT_GROUP, it is possible to send such atoms without
+ * BASE_JD_REQ_COHERENT_GROUP set. This is an unlikely case, but it's easy
+ * enough to handle anyway.
+ */
+ KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES,
+
+ /** Must be the last in the enum */
+ KBASEP_JS_CTX_ATTR_COUNT
+} kbasep_js_ctx_attr;
+
+enum {
+ /** Bit indicating that new atom should be started because this atom completed */
+ KBASE_JS_ATOM_DONE_START_NEW_ATOMS = (1u << 0),
+ /** Bit indicating that the atom was evicted from the JSn_NEXT registers */
+ KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT = (1u << 1)
+};
+
+/** Combination of KBASE_JS_ATOM_DONE_<...> bits */
+typedef u32 kbasep_js_atom_done_code;
+
+/**
+ * Data used by the scheduler that is unique for each Address Space.
+ *
+ * This is used in IRQ context and kbasep_js_device_data::runpoool_irq::lock
+ * must be held whilst accessing this data (inculding reads and atomic
+ * decisions based on the read).
+ */
+typedef struct kbasep_js_per_as_data {
+ /**
+ * Ref count of whether this AS is busy, and must not be scheduled out
+ *
+ * When jobs are running this is always positive. However, it can still be
+ * positive when no jobs are running. If all you need is a heuristic to
+ * tell you whether jobs might be running, this should be sufficient.
+ */
+ int as_busy_refcount;
+
+ /** Pointer to the current context on this address space, or NULL for no context */
+ kbase_context *kctx;
+} kbasep_js_per_as_data;
+
+/**
+ * @brief KBase Device Data Job Scheduler sub-structure
+ *
+ * This encapsulates the current context of the Job Scheduler on a particular
+ * device. This context is global to the device, and is not tied to any
+ * particular kbase_context running on the device.
+ *
+ * nr_contexts_running and as_free are optimized for packing together (by making
+ * them smaller types than u32). The operations on them should rarely involve
+ * masking. The use of signed types for arithmetic indicates to the compiler that
+ * the value will not rollover (which would be undefined behavior), and so under
+ * the Total License model, it is free to make optimizations based on that (i.e.
+ * to remove masking).
+ */
+typedef struct kbasep_js_device_data {
+ /** Sub-structure to collect together Job Scheduling data used in IRQ context */
+ struct runpool_irq {
+ /**
+ * Lock for accessing Job Scheduling data used in IRQ context
+ *
+ * This lock must be held whenever this data is accessed (read, or
+ * write). Even for read-only access, memory barriers would be needed.
+ * In any case, it is likely that decisions based on only reading must
+ * also be atomic with respect to data held here and elsewhere in the
+ * Job Scheduler.
+ *
+ * This lock must also be held for accessing:
+ * - kbase_context::as_nr
+ * - kbase_device::jm_slots
+ * - Parts of the kbasep_js_policy, dependent on the policy (refer to
+ * the policy in question for more information)
+ * - Parts of kbasep_js_policy_ctx_info, dependent on the policy (refer to
+ * the policy in question for more information)
+ */
+ spinlock_t lock;
+
+ /** Bitvector indicating whether a currently scheduled context is allowed to submit jobs.
+ * When bit 'N' is set in this, it indicates whether the context bound to address space
+ * 'N' (per_as_data[N].kctx) is allowed to submit jobs.
+ *
+ * It is placed here because it's much more memory efficient than having a mali_bool8 in
+ * kbasep_js_per_as_data to store this flag */
+ u16 submit_allowed;
+
+ /** Context Attributes:
+ * Each is large enough to hold a refcount of the number of contexts
+ * that can fit into the runpool. This is currently BASE_MAX_NR_AS
+ *
+ * Note that when BASE_MAX_NR_AS==16 we need 5 bits (not 4) to store
+ * the refcount. Hence, it's not worthwhile reducing this to
+ * bit-manipulation on u32s to save space (where in contrast, 4 bit
+ * sub-fields would be easy to do and would save space).
+ *
+ * Whilst this must not become negative, the sign bit is used for:
+ * - error detection in debug builds
+ * - Optimization: it is undefined for a signed int to overflow, and so
+ * the compiler can optimize for that never happening (thus, no masking
+ * is required on updating the variable) */
+ s8 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+ /** Data that is unique for each AS */
+ kbasep_js_per_as_data per_as_data[BASE_MAX_NR_AS];
+
+ /*
+ * Affinity management and tracking
+ */
+ /** Bitvector to aid affinity checking. Element 'n' bit 'i' indicates
+ * that slot 'n' is using core i (i.e. slot_affinity_refcount[n][i] > 0) */
+ u64 slot_affinities[BASE_JM_MAX_NR_SLOTS];
+ /** Bitvector indicating which slots \em might have atoms blocked on
+ * them because otherwise they'd violate affinity restrictions */
+ u16 slots_blocked_on_affinity;
+ /** Refcount for each core owned by each slot. Used to generate the
+ * slot_affinities array of bitvectors
+ *
+ * The value of the refcount will not exceed BASE_JM_SUBMIT_SLOTS,
+ * because it is refcounted only when a job is definitely about to be
+ * submitted to a slot, and is de-refcounted immediately after a job
+ * finishes */
+ s8 slot_affinity_refcount[BASE_JM_MAX_NR_SLOTS][64];
+ } runpool_irq;
+
+ /**
+ * Run Pool mutex, for managing contexts within the runpool.
+ * Unless otherwise specified, you must hold this lock whilst accessing any
+ * members that follow
+ *
+ * In addition, this is used to access:
+ * - the kbasep_js_kctx_info::runpool substructure
+ */
+ struct mutex runpool_mutex;
+
+ /**
+ * Queue Lock, used to access the Policy's queue of contexts independently
+ * of the Run Pool.
+ *
+ * Of course, you don't need the Run Pool lock to access this.
+ */
+ struct mutex queue_mutex;
+
+ u16 as_free; /**< Bitpattern of free Address Spaces */
+
+ /** Number of currently scheduled user contexts (excluding ones that are not submitting jobs) */
+ s8 nr_user_contexts_running;
+ /** Number of currently scheduled contexts (including ones that are not submitting jobs) */
+ s8 nr_all_contexts_running;
+
+ /**
+ * Policy-specific information.
+ *
+ * Refer to the structure defined by the current policy to determine which
+ * locks must be held when accessing this.
+ */
+ kbasep_js_policy policy;
+
+ /** Core Requirements to match up with base_js_atom's core_req memeber
+ * @note This is a write-once member, and so no locking is required to read */
+ base_jd_core_req js_reqs[BASE_JM_MAX_NR_SLOTS];
+
+ u32 scheduling_tick_ns; /**< Value for KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS */
+ u32 soft_stop_ticks; /**< Value for KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS */
+ u32 hard_stop_ticks_ss; /**< Value for KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS */
+ u32 hard_stop_ticks_nss; /**< Value for KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS */
+ u32 gpu_reset_ticks_ss; /**< Value for KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS */
+ u32 gpu_reset_ticks_nss; /**< Value for KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS */
+ u32 ctx_timeslice_ns; /**< Value for KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS */
+ u32 cfs_ctx_runtime_init_slices; /**< Value for KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES */
+ u32 cfs_ctx_runtime_min_slices; /**< Value for KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES */
+
+ /** List of suspended soft jobs */
+ struct list_head suspended_soft_jobs_list;
+
+#ifdef CONFIG_MALI_DEBUG
+ /* Support soft-stop on a single context */
+ mali_bool softstop_always;
+#endif /* CONFIG_MALI_DEBUG */
+ /** The initalized-flag is placed at the end, to avoid cache-pollution (we should
+ * only be using this during init/term paths).
+ * @note This is a write-once member, and so no locking is required to read */
+ int init_status;
+} kbasep_js_device_data;
+
+/**
+ * @brief KBase Context Job Scheduling information structure
+ *
+ * This is a substructure in the kbase_context that encapsulates all the
+ * scheduling information.
+ */
+typedef struct kbasep_js_kctx_info {
+ /**
+ * Runpool substructure. This must only be accessed whilst the Run Pool
+ * mutex ( kbasep_js_device_data::runpool_mutex ) is held.
+ *
+ * In addition, the kbasep_js_device_data::runpool_irq::lock may need to be
+ * held for certain sub-members.
+ *
+ * @note some of the members could be moved into kbasep_js_device_data for
+ * improved d-cache/tlb efficiency.
+ */
+ struct {
+ kbasep_js_policy_ctx_info policy_ctx; /**< Policy-specific context */
+ } runpool;
+
+ /**
+ * Job Scheduler Context information sub-structure. These members are
+ * accessed regardless of whether the context is:
+ * - In the Policy's Run Pool
+ * - In the Policy's Queue
+ * - Not queued nor in the Run Pool.
+ *
+ * You must obtain the jsctx_mutex before accessing any other members of
+ * this substructure.
+ *
+ * You may not access any of these members from IRQ context.
+ */
+ struct {
+ struct mutex jsctx_mutex; /**< Job Scheduler Context lock */
+
+ /** Number of jobs <b>ready to run</b> - does \em not include the jobs waiting in
+ * the dispatcher, and dependency-only jobs. See kbase_jd_context::job_nr
+ * for such jobs*/
+ u32 nr_jobs;
+
+ /** Context Attributes:
+ * Each is large enough to hold a refcount of the number of atoms on
+ * the context. **/
+ u32 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+ kbase_context_flags flags;
+ /* NOTE: Unify the following flags into kbase_context_flags */
+ /**
+ * Is the context scheduled on the Run Pool?
+ *
+ * This is only ever updated whilst the jsctx_mutex is held.
+ */
+ mali_bool is_scheduled;
+ /**
+ * Wait queue to wait for is_scheduled state changes.
+ * */
+ wait_queue_head_t is_scheduled_wait;
+
+ mali_bool is_dying; /**< Is the context in the process of being evicted? */
+ } ctx;
+
+ /* The initalized-flag is placed at the end, to avoid cache-pollution (we should
+ * only be using this during init/term paths) */
+ int init_status;
+} kbasep_js_kctx_info;
+
+/** Subset of atom state that can be available after jd_done_nolock() is called
+ * on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(),
+ * because the original atom could disappear. */
+typedef struct kbasep_js_atom_retained_state {
+ /** Event code - to determine whether the atom has finished */
+ base_jd_event_code event_code;
+ /** core requirements */
+ base_jd_core_req core_req;
+ /** Job Slot to retry submitting to if submission from IRQ handler failed */
+ int retry_submit_on_slot;
+
+} kbasep_js_atom_retained_state;
+
+/**
+ * Value signifying 'no retry on a slot required' for:
+ * - kbase_js_atom_retained_state::retry_submit_on_slot
+ * - kbase_jd_atom::retry_submit_on_slot
+ */
+#define KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID (-1)
+
+/**
+ * base_jd_core_req value signifying 'invalid' for a kbase_jd_atom_retained_state.
+ *
+ * @see kbase_atom_retained_state_is_valid()
+ */
+#define KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID BASE_JD_REQ_DEP
+
+/**
+ * @brief The JS timer resolution, in microseconds
+ *
+ * Any non-zero difference in time will be at least this size.
+ */
+#define KBASEP_JS_TICK_RESOLUTION_US 1
+
+#endif /* _KBASE_JS_DEFS_H_ */
+
+ /** @} *//* end group kbase_js */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy.h
new file mode 100755
index 00000000000..719c43060a1
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy.h
@@ -0,0 +1,765 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js_policy.h
+ * Job Scheduler Policy APIs.
+ */
+
+#ifndef _KBASE_JS_POLICY_H_
+#define _KBASE_JS_POLICY_H_
+
+/**
+ * @page page_kbase_js_policy Job Scheduling Policies
+ * The Job Scheduling system is described in the following:
+ * - @subpage page_kbase_js_policy_overview
+ * - @subpage page_kbase_js_policy_operation
+ *
+ * The API details are as follows:
+ * - @ref kbase_jm
+ * - @ref kbase_js
+ * - @ref kbase_js_policy
+ */
+
+/**
+ * @page page_kbase_js_policy_overview Overview of the Policy System
+ *
+ * The Job Scheduler Policy manages:
+ * - The assigning of KBase Contexts to GPU Address Spaces (\em ASs)
+ * - The choosing of Job Chains (\em Jobs) from a KBase context, to run on the
+ * GPU's Job Slots (\em JSs).
+ * - The amount of \em time a context is assigned to (<em>scheduled on</em>) an
+ * Address Space
+ * - The amount of \em time a Job spends running on the GPU
+ *
+ * The Policy implements this management via 2 components:
+ * - A Policy Queue, which manages a set of contexts that are ready to run,
+ * but not currently running.
+ * - A Policy Run Pool, which manages the currently running contexts (one per Address
+ * Space) and the jobs to run on the Job Slots.
+ *
+ * Each Graphics Process in the system has at least one KBase Context. Therefore,
+ * the Policy Queue can be seen as a queue of Processes waiting to run Jobs on
+ * the GPU.
+ *
+ * <!-- The following needs to be all on one line, due to doxygen's parser -->
+ * @dotfile policy_overview.dot "Diagram showing a very simplified overview of the Policy System. IRQ handling, soft/hard-stopping, contexts re-entering the system and Policy details are omitted"
+ *
+ * The main operations on the queue are:
+ * - Enqueuing a Context to it
+ * - Dequeuing a Context from it, to run it.
+ * - Note: requeuing a context is much the same as enqueuing a context, but
+ * occurs when a context is scheduled out of the system to allow other contexts
+ * to run.
+ *
+ * These operations have much the same meaning for the Run Pool - Jobs are
+ * dequeued to run on a Jobslot, and requeued when they are scheduled out of
+ * the GPU.
+ *
+ * @note This is an over-simplification of the Policy APIs - there are more
+ * operations than 'Enqueue'/'Dequeue', and a Dequeue from the Policy Queue
+ * takes at least two function calls: one to Dequeue from the Queue, one to add
+ * to the Run Pool.
+ *
+ * As indicated on the diagram, Jobs permanently leave the scheduling system
+ * when they are completed, otherwise they get dequeued/requeued until this
+ * happens. Similarly, Contexts leave the scheduling system when their jobs
+ * have all completed. However, Contexts may later return to the scheduling
+ * system (not shown on the diagram) if more Bags of Jobs are submitted to
+ * them.
+ */
+
+/**
+ * @page page_kbase_js_policy_operation Policy Operation
+ *
+ * We describe the actions that the Job Scheduler Core takes on the Policy in
+ * the following cases:
+ * - The IRQ Path
+ * - The Job Submission Path
+ * - The High Priority Job Submission Path
+ *
+ * This shows how the Policy APIs will be used by the Job Scheduler core.
+ *
+ * The following diagram shows an example Policy that contains a Low Priority
+ * queue, and a Real-time (High Priority) Queue. The RT queue is examined
+ * before the LowP one on dequeuing from the head. The Low Priority Queue is
+ * ordered by time, and the RT queue is ordered by RT-priority, and then by
+ * time. In addition, it shows that the Job Scheduler Core will start a
+ * Soft-Stop Timer (SS-Timer) when it dequeue's and submits a job. The
+ * Soft-Stop time is set by a global configuration value, and must be a value
+ * appropriate for the policy. For example, this could include "don't run a
+ * soft-stop timer" for a First-Come-First-Served (FCFS) policy.
+ *
+ * <!-- The following needs to be all on one line, due to doxygen's parser -->
+ * @dotfile policy_operation_diagram.dot "Diagram showing the objects managed by an Example Policy, and the operations made upon these objects by the Job Scheduler Core."
+ *
+ * @section sec_kbase_js_policy_operation_prio Dealing with Priority
+ *
+ * Priority applies both to a context as a whole, and to the jobs within a
+ * context. The jobs specify a priority in the base_jd_atom::prio member, which
+ * is relative to that of the context. A positive setting indicates a reduction
+ * in priority, whereas a negative setting indicates a boost in priority. Of
+ * course, the boost in priority should only be honoured when the originating
+ * process has sufficient priviledges, and should be ignored for unpriviledged
+ * processes. The meaning of the combined priority value is up to the policy
+ * itself, and could be a logarithmic scale instead of a linear scale (e.g. the
+ * policy could implement an increase/decrease in priority by 1 results in an
+ * increase/decrease in \em proportion of time spent scheduled in by 25%, an
+ * effective change in timeslice by 11%).
+ *
+ * It is up to the policy whether a boost in priority boosts the priority of
+ * the entire context (e.g. to such an extent where it may pre-empt other
+ * running contexts). If it chooses to do this, the Policy must make sure that
+ * only the high-priority jobs are run, and that the context is scheduled out
+ * once only low priority jobs remain. This ensures that the low priority jobs
+ * within the context do not gain from the priority boost, yet they still get
+ * scheduled correctly with respect to other low priority contexts.
+ *
+ *
+ * @section sec_kbase_js_policy_operation_irq IRQ Path
+ *
+ * The following happens on the IRQ path from the Job Scheduler Core:
+ * - Note the slot that completed (for later)
+ * - Log the time spent by the job (and implicitly, the time spent by the
+ * context)
+ * - call kbasep_js_policy_log_job_result() <em>in the context of the irq
+ * handler.</em>
+ * - This must happen regardless of whether the job completed successfully or
+ * not (otherwise the context gets away with DoS'ing the system with faulty jobs)
+ * - What was the result of the job?
+ * - If Completed: job is just removed from the system
+ * - If Hard-stop or failure: job is removed from the system
+ * - If Soft-stop: queue the book-keeping work onto a work-queue: have a
+ * work-queue call kbasep_js_policy_enqueue_job()
+ * - Check the timeslice used by the owning context
+ * - call kbasep_js_policy_should_remove_ctx() <em>in the context of the irq
+ * handler.</em>
+ * - If this returns true, clear the "allowed" flag.
+ * - Check the ctx's flags for "allowed", "has jobs to run" and "is running
+ * jobs"
+ * - And so, should the context stay scheduled in?
+ * - If No, push onto a work-queue the work of scheduling out the old context,
+ * and getting a new one. That is:
+ * - kbasep_js_policy_runpool_remove_ctx() on old_ctx
+ * - kbasep_js_policy_enqueue_ctx() on old_ctx
+ * - kbasep_js_policy_dequeue_head_ctx() to get new_ctx
+ * - kbasep_js_policy_runpool_add_ctx() on new_ctx
+ * - (all of this work is deferred on a work-queue to keep the IRQ handler quick)
+ * - If there is space in the completed job slots' HEAD/NEXT registers, run the next job:
+ * - kbasep_js_policy_dequeue_job() <em>in the context of the irq
+ * handler</em> with core_req set to that of the completing slot
+ * - if this returned MALI_TRUE, submit the job to the completed slot.
+ * - This is repeated until kbasep_js_policy_dequeue_job() returns
+ * MALI_FALSE, or the job slot has a job queued on both the HEAD and NEXT registers.
+ * - If kbasep_js_policy_dequeue_job() returned false, submit some work to
+ * the work-queue to retry from outside of IRQ context (calling
+ * kbasep_js_policy_dequeue_job() from a work-queue).
+ *
+ * Since the IRQ handler submits new jobs \em and re-checks the IRQ_RAWSTAT,
+ * this sequence could loop a large number of times: this could happen if
+ * the jobs submitted completed on the GPU very quickly (in a few cycles), such
+ * as GPU NULL jobs. Then, the HEAD/NEXT registers will always be free to take
+ * more jobs, causing us to loop until we run out of jobs.
+ *
+ * To mitigate this, we must limit the number of jobs submitted per slot during
+ * the IRQ handler - for example, no more than 2 jobs per slot per IRQ should
+ * be sufficient (to fill up the HEAD + NEXT registers in normal cases). For
+ * Mali-T600 with 3 job slots, this means that up to 6 jobs could be submitted per
+ * slot. Note that IRQ Throttling can make this situation commonplace: 6 jobs
+ * could complete but the IRQ for each of them is delayed by the throttling. By
+ * the time you get the IRQ, all 6 jobs could've completed, meaning you can
+ * submit jobs to fill all 6 HEAD+NEXT registers again.
+ *
+ * @note As much work is deferred as possible, which includes the scheduling
+ * out of a context and scheduling in a new context. However, we can still make
+ * starting a single high-priorty context quick despite this:
+ * - On Mali-T600 family, there is one more AS than JSs.
+ * - This means we can very quickly schedule out one AS, no matter what the
+ * situation (because there will always be one AS that's not currently running
+ * on the job slot - it can only have a job in the NEXT register).
+ * - Even with this scheduling out, fair-share can still be guaranteed e.g. by
+ * a timeline-based Completely Fair Scheduler.
+ * - When our high-priority context comes in, we can do this quick-scheduling
+ * out immediately, and then schedule in the high-priority context without having to block.
+ * - This all assumes that the context to schedule out is of lower
+ * priority. Otherwise, we will have to block waiting for some other low
+ * priority context to finish its jobs. Note that it's likely (but not
+ * impossible) that the high-priority context \b is running jobs, by virtue of
+ * it being high priority.
+ * - Therefore, we can give a high liklihood that on Mali-T600 at least one
+ * high-priority context can be started very quickly. For the general case, we
+ * can guarantee starting (no. ASs) - (no. JSs) high priority contexts
+ * quickly. In any case, there is a high likelihood that we're able to start
+ * more than one high priority context quickly.
+ *
+ * In terms of the functions used in the IRQ handler directly, these are the
+ * perfomance considerations:
+ * - kbase_js_policy_log_job_result():
+ * - This is just adding to a 64-bit value (possibly even a 32-bit value if we
+ * only store the time the job's recently spent - see below on 'priority weighting')
+ * - For priority weighting, a divide operation ('div') could happen, but
+ * this can happen in a deferred context (outside of IRQ) when scheduling out
+ * the ctx; as per our Engineering Specification, the contexts of different
+ * priority still stay scheduled in for the same timeslice, but higher priority
+ * ones scheduled back in more often.
+ * - That is, the weighted and unweighted times must be stored separately, and
+ * the weighted time is only updated \em outside of IRQ context.
+ * - Of course, this divide is more likely to be a 'multiply by inverse of the
+ * weight', assuming that the weight (priority) doesn't change.
+ * - kbasep_js_policy_should_remove_ctx():
+ * - This is usually just a comparison of the stored time value against some
+ * maximum value.
+ *
+ * @note all deferred work can be wrapped up into one call - we usually need to
+ * indicate that a job/bag is done outside of IRQ context anyway.
+ *
+ *
+ *
+ * @section sec_kbase_js_policy_operation_submit Submission path
+ *
+ * Start with a Context with no jobs present, and assume equal priority of all
+ * contexts in the system. The following work all happens outside of IRQ
+ * Context :
+ * - As soon as job is made 'ready to 'run', then is must be registerd with the Job
+ * Scheduler Policy:
+ * - 'Ready to run' means they've satisified their dependencies in the
+ * Kernel-side Job Dispatch system.
+ * - Call kbasep_js_policy_enqueue_job()
+ * - This indicates that the job should be scheduled (it is ready to run).
+ * - As soon as a ctx changes from having 0 jobs 'ready to run' to >0 jobs
+ * 'ready to run', we enqueue the context on the policy queue:
+ * - Call kbasep_js_policy_enqueue_ctx()
+ * - This indicates that the \em ctx should be scheduled (it is ready to run)
+ *
+ * Next, we need to handle adding a context to the Run Pool - if it's sensible
+ * to do so. This can happen due to two reasons:
+ * -# A context is enqueued as above, and there are ASs free for it to run on
+ * (e.g. it is the first context to be run, in which case it can be added to
+ * the Run Pool immediately after enqueuing on the Policy Queue)
+ * -# A previous IRQ caused another ctx to be scheduled out, requiring that the
+ * context at the head of the queue be scheduled in. Such steps would happen in
+ * a work queue (work deferred from the IRQ context).
+ *
+ * In both cases, we'd handle it as follows:
+ * - Get the context at the Head of the Policy Queue:
+ * - Call kbasep_js_policy_dequeue_head_ctx()
+ * - Assign the Context an Address Space (Assert that there will be one free,
+ * given the above two reasons)
+ * - Add this context to the Run Pool:
+ * - Call kbasep_js_policy_runpool_add_ctx()
+ * - Now see if a job should be run:
+ * - Mostly, this will be done in the IRQ handler at the completion of a
+ * previous job.
+ * - However, there are two cases where this cannot be done: a) The first job
+ * enqueued to the system (there is no previous IRQ to act upon) b) When jobs
+ * are submitted at a low enough rate to not fill up all Job Slots (or, not to
+ * fill both the 'HEAD' and 'NEXT' registers in the job-slots)
+ * - Hence, on each ctx <b>and job</b> submission we should try to see if we
+ * can run a job:
+ * - For each job slot that has free space (in NEXT or HEAD+NEXT registers):
+ * - Call kbasep_js_policy_dequeue_job() with core_req set to that of the
+ * slot
+ * - if we got one, submit it to the job slot.
+ * - This is repeated until kbasep_js_policy_dequeue_job() returns
+ * MALI_FALSE, or the job slot has a job queued on both the HEAD and NEXT registers.
+ *
+ * The above case shows that we should attempt to run jobs in cases where a) a ctx
+ * has been added to the Run Pool, and b) new jobs have been added to a context
+ * in the Run Pool:
+ * - In the latter case, the context is in the runpool because it's got a job
+ * ready to run, or is already running a job
+ * - We could just wait until the IRQ handler fires, but for certain types of
+ * jobs this can take comparatively a long time to complete, e.g. GLES FS jobs
+ * generally take much longer to run that GLES CS jobs, which are vertex shader
+ * jobs.
+ * - Therefore, when a new job appears in the ctx, we must check the job-slots
+ * to see if they're free, and run the jobs as before.
+ *
+ *
+ *
+ * @section sec_kbase_js_policy_operation_submit_hipri Submission path for High Priority Contexts
+ *
+ * For High Priority Contexts on Mali-T600, we can make sure that at least 1 of
+ * them can be scheduled in immediately to start high prioriy jobs. In general,
+ * (no. ASs) - (no JSs) high priority contexts may be started immediately. The
+ * following describes how this happens:
+ *
+ * Similar to the previous section, consider what happens with a high-priority
+ * context (a context with a priority higher than that of any in the Run Pool)
+ * that starts out with no jobs:
+ * - A job becomes ready to run on the context, and so we enqueue the context
+ * on the Policy's Queue.
+ * - However, we'd like to schedule in this context immediately, instead of
+ * waiting for one of the Run Pool contexts' timeslice to expire
+ * - The policy's Enqueue function must detect this (because it is the policy
+ * that embodies the concept of priority), and take appropriate action
+ * - That is, kbasep_js_policy_enqueue_ctx() should check the Policy's Run
+ * Pool to see if a lower priority context should be scheduled out, and then
+ * schedule in the High Priority context.
+ * - For Mali-T600, we can always pick a context to schedule out immediately
+ * (because there are more ASs than JSs), and so scheduling out a victim context
+ * and scheduling in the high priority context can happen immediately.
+ * - If a policy implements fair-sharing, then this can still ensure the
+ * victim later on gets a fair share of the GPU.
+ * - As a note, consider whether the victim can be of equal/higher priority
+ * than the incoming context:
+ * - Usually, higher priority contexts will be the ones currently running
+ * jobs, and so the context with the lowest priority is usually not running
+ * jobs.
+ * - This makes it likely that the victim context is low priority, but
+ * it's not impossible for it to be a high priority one:
+ * - Suppose 3 high priority contexts are submitting only FS jobs, and one low
+ * priority context submitting CS jobs. Then, the context not running jobs will
+ * be one of the hi priority contexts (because only 2 FS jobs can be
+ * queued/running on the GPU HW for Mali-T600).
+ * - The problem can be mitigated by extra action, but it's questionable
+ * whether we need to: we already have a high likelihood that there's at least
+ * one high priority context - that should be good enough.
+ * - And so, this method makes sure that at least one high priority context
+ * can be started very quickly, but more than one high priority contexts could be
+ * delayed (up to one timeslice).
+ * - To improve this, use a GPU with a higher number of Address Spaces vs Job
+ * Slots.
+ * - At this point, let's assume this high priority context has been scheduled
+ * in immediately. The next step is to ensure it can start some jobs quickly.
+ * - It must do this by Soft-Stopping jobs on any of the Job Slots that it can
+ * submit to.
+ * - The rest of the logic for starting the jobs is taken care of by the IRQ
+ * handler. All the policy needs to do is ensure that
+ * kbasep_js_policy_dequeue_job() will return the jobs from the high priority
+ * context.
+ *
+ * @note in SS state, we currently only use 2 job-slots (even for T608, but
+ * this might change in future). In this case, it's always possible to schedule
+ * out 2 ASs quickly (their jobs won't be in the HEAD registers). At the same
+ * time, this maximizes usage of the job-slots (only 2 are in use), because you
+ * can guarantee starting of the jobs from the High Priority contexts immediately too.
+ *
+ *
+ *
+ * @section sec_kbase_js_policy_operation_notes Notes
+ *
+ * - In this design, a separate 'init' is needed from dequeue/requeue, so that
+ * information can be retained between the dequeue/requeue calls. For example,
+ * the total time spent for a context/job could be logged between
+ * dequeue/requeuing, to implement Fair Sharing. In this case, 'init' just
+ * initializes that information to some known state.
+ *
+ *
+ *
+ */
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js_policy Job Scheduler Policy APIs
+ * @{
+ *
+ * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
+ * the Job Scheduler Policy and its use from the Job Scheduler Core.</b>
+ */
+
+/**
+ * @brief Job Scheduler Policy structure
+ */
+union kbasep_js_policy;
+
+/**
+ * @brief Initialize the Job Scheduler Policy
+ */
+mali_error kbasep_js_policy_init(kbase_device *kbdev);
+
+/**
+ * @brief Terminate the Job Scheduler Policy
+ */
+void kbasep_js_policy_term(kbasep_js_policy *js_policy);
+
+/**
+ * @addtogroup kbase_js_policy_ctx Job Scheduler Policy, Context Management API
+ * @{
+ *
+ * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
+ * the Job Scheduler Policy and its use from the Job Scheduler Core.</b>
+ */
+
+/**
+ * @brief Job Scheduler Policy Ctx Info structure
+ *
+ * This structure is embedded in the kbase_context structure. It is used to:
+ * - track information needed for the policy to schedule the context (e.g. time
+ * used, OS priority etc.)
+ * - link together kbase_contexts into a queue, so that a kbase_context can be
+ * obtained as the container of the policy ctx info. This allows the API to
+ * return what "the next context" should be.
+ * - obtain other information already stored in the kbase_context for
+ * scheduling purposes (e.g process ID to get the priority of the originating
+ * process)
+ */
+union kbasep_js_policy_ctx_info;
+
+/**
+ * @brief Initialize a ctx for use with the Job Scheduler Policy
+ *
+ * This effectively initializes the kbasep_js_policy_ctx_info structure within
+ * the kbase_context (itself located within the kctx->jctx.sched_info structure).
+ */
+mali_error kbasep_js_policy_init_ctx(kbase_device *kbdev, kbase_context *kctx);
+
+/**
+ * @brief Terminate resources associated with using a ctx in the Job Scheduler
+ * Policy.
+ */
+void kbasep_js_policy_term_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+
+/**
+ * @brief Enqueue a context onto the Job Scheduler Policy Queue
+ *
+ * If the context enqueued has a priority higher than any in the Run Pool, then
+ * it is the Policy's responsibility to decide whether to schedule out a low
+ * priority context from the Run Pool to allow the high priority context to be
+ * scheduled in.
+ *
+ * If the context has the privileged flag set, it will always be kept at the
+ * head of the queue.
+ *
+ * The caller will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * The caller will be holding kbasep_js_device_data::queue_mutex.
+ */
+void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+
+/**
+ * @brief Dequeue a context from the Head of the Job Scheduler Policy Queue
+ *
+ * The caller will be holding kbasep_js_device_data::queue_mutex.
+ *
+ * @return MALI_TRUE if a context was available, and *kctx_ptr points to
+ * the kctx dequeued.
+ * @return MALI_FALSE if no contexts were available.
+ */
+mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_context ** const kctx_ptr);
+
+/**
+ * @brief Evict a context from the Job Scheduler Policy Queue
+ *
+ * This is only called as part of destroying a kbase_context.
+ *
+ * There are many reasons why this might fail during the lifetime of a
+ * context. For example, the context is in the process of being scheduled. In
+ * that case a thread doing the scheduling might have a pointer to it, but the
+ * context is neither in the Policy Queue, nor is it in the Run
+ * Pool. Crucially, neither the Policy Queue, Run Pool, or the Context itself
+ * are locked.
+ *
+ * Hence to find out where in the system the context is, it is important to do
+ * more than just check the kbasep_js_kctx_info::ctx::is_scheduled member.
+ *
+ * The caller will be holding kbasep_js_device_data::queue_mutex.
+ *
+ * @return MALI_TRUE if the context was evicted from the Policy Queue
+ * @return MALI_FALSE if the context was not found in the Policy Queue
+ */
+mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+
+/**
+ * @brief Call a function on all jobs belonging to a non-queued, non-running
+ * context, optionally detaching the jobs from the context as it goes.
+ *
+ * At the time of the call, the context is guarenteed to be not-currently
+ * scheduled on the Run Pool (is_scheduled == MALI_FALSE), and not present in
+ * the Policy Queue. This is because one of the following functions was used
+ * recently on the context:
+ * - kbasep_js_policy_evict_ctx()
+ * - kbasep_js_policy_runpool_remove_ctx()
+ *
+ * In both cases, no subsequent call was made on the context to any of:
+ * - kbasep_js_policy_runpool_add_ctx()
+ * - kbasep_js_policy_enqueue_ctx()
+ *
+ * Due to the locks that might be held at the time of the call, the callback
+ * may need to defer work on a workqueue to complete its actions (e.g. when
+ * cancelling jobs)
+ *
+ * \a detach_jobs must only be set when cancelling jobs (which occurs as part
+ * of context destruction).
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ */
+void kbasep_js_policy_foreach_ctx_job(kbasep_js_policy *js_policy, kbase_context *kctx,
+ kbasep_js_policy_ctx_job_cb callback, mali_bool detach_jobs);
+
+/**
+ * @brief Add a context to the Job Scheduler Policy's Run Pool
+ *
+ * If the context enqueued has a priority higher than any in the Run Pool, then
+ * it is the Policy's responsibility to decide whether to schedule out low
+ * priority jobs that are currently running on the GPU.
+ *
+ * The number of contexts present in the Run Pool will never be more than the
+ * number of Address Spaces.
+ *
+ * The following guarentees are made about the state of the system when this
+ * is called:
+ * - kctx->as_nr member is valid
+ * - the context has its submit_allowed flag set
+ * - kbasep_js_device_data::runpool_irq::per_as_data[kctx->as_nr] is valid
+ * - The refcount of the context is guarenteed to be zero.
+ * - kbasep_js_kctx_info::ctx::is_scheduled will be MALI_TRUE.
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it will be holding kbasep_js_device_data::runpool_mutex.
+ * - it will be holding kbasep_js_device_data::runpool_irq::lock (a spinlock)
+ *
+ * Due to a spinlock being held, this function must not call any APIs that sleep.
+ */
+void kbasep_js_policy_runpool_add_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+
+/**
+ * @brief Remove a context from the Job Scheduler Policy's Run Pool
+ *
+ * The kctx->as_nr member is valid and the context has its submit_allowed flag
+ * set when this is called. The state of
+ * kbasep_js_device_data::runpool_irq::per_as_data[kctx->as_nr] is also
+ * valid. The refcount of the context is guarenteed to be zero.
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it will be holding kbasep_js_device_data::runpool_mutex.
+ * - it will be holding kbasep_js_device_data::runpool_irq::lock (a spinlock)
+ *
+ * Due to a spinlock being held, this function must not call any APIs that sleep.
+ */
+void kbasep_js_policy_runpool_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+
+/**
+ * @brief Indicate whether a context should be removed from the Run Pool
+ * (should be scheduled out).
+ *
+ * The kbasep_js_device_data::runpool_irq::lock will be held by the caller.
+ *
+ * @note This API is called from IRQ context.
+ */
+mali_bool kbasep_js_policy_should_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+
+/**
+ * @brief Synchronize with any timers acting upon the runpool
+ *
+ * The policy should check whether any timers it owns should be running. If
+ * they should not, the policy must cancel such timers and ensure they are not
+ * re-run by the time this function finishes.
+ *
+ * In particular, the timers must not be running when there are no more contexts
+ * on the runpool, because the GPU could be powered off soon after this call.
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it will be holding kbasep_js_device_data::runpool_mutex.
+ */
+void kbasep_js_policy_runpool_timers_sync(kbasep_js_policy *js_policy);
+
+
+/**
+ * @brief Indicate whether a new context has an higher priority than the current context.
+ *
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held for \a new_ctx
+ *
+ * This function must not sleep, because an IRQ spinlock might be held whilst
+ * this is called.
+ *
+ * @note There is nothing to stop the priority of \a current_ctx changing
+ * during or immediately after this function is called (because its jsctx_mutex
+ * cannot be held). Therefore, this function should only be seen as a heuristic
+ * guide as to whether \a new_ctx is higher priority than \a current_ctx
+ */
+mali_bool kbasep_js_policy_ctx_has_priority(kbasep_js_policy *js_policy, kbase_context *current_ctx, kbase_context *new_ctx);
+
+ /** @} *//* end group kbase_js_policy_ctx */
+
+/**
+ * @addtogroup kbase_js_policy_job Job Scheduler Policy, Job Chain Management API
+ * @{
+ *
+ * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
+ * the Job Scheduler Policy and its use from the Job Scheduler Core.</b>
+ */
+
+/**
+ * @brief Job Scheduler Policy Job Info structure
+ *
+ * This structure is embedded in the kbase_jd_atom structure. It is used to:
+ * - track information needed for the policy to schedule the job (e.g. time
+ * used, OS priority etc.)
+ * - link together jobs into a queue/buffer, so that a kbase_jd_atom can be
+ * obtained as the container of the policy job info. This allows the API to
+ * return what "the next job" should be.
+ * - obtain other information already stored in the kbase_context for
+ * scheduling purposes (e.g user-side relative priority)
+ */
+union kbasep_js_policy_job_info;
+
+/**
+ * @brief Initialize a job for use with the Job Scheduler Policy
+ *
+ * This function initializes the kbasep_js_policy_job_info structure within the
+ * kbase_jd_atom. It will only initialize/allocate resources that are specific
+ * to the job.
+ *
+ * That is, this function makes \b no attempt to:
+ * - initialize any context/policy-wide information
+ * - enqueue the job on the policy.
+ *
+ * At some later point, the following functions must be called on the job, in this order:
+ * - kbasep_js_policy_register_job() to register the job and initialize policy/context wide data.
+ * - kbasep_js_policy_enqueue_job() to enqueue the job
+ *
+ * A job must only ever be initialized on the Policy once, and must be
+ * terminated on the Policy before the job is freed.
+ *
+ * The caller will not be holding any locks, and so this function will not
+ * modify any information in \a kctx or \a js_policy.
+ *
+ * @return MALI_ERROR_NONE if initialization was correct.
+ */
+mali_error kbasep_js_policy_init_job(const kbasep_js_policy *js_policy, const kbase_context *kctx, kbase_jd_atom *katom);
+
+/**
+ * @brief Register context/policy-wide information for a job on the Job Scheduler Policy.
+ *
+ * Registers the job with the policy. This is used to track the job before it
+ * has been enqueued/requeued by kbasep_js_policy_enqueue_job(). Specifically,
+ * it is used to update information under a lock that could not be updated at
+ * kbasep_js_policy_init_job() time (such as context/policy-wide data).
+ *
+ * @note This function will not fail, and hence does not allocate any
+ * resources. Any failures that could occur on registration will be caught
+ * during kbasep_js_policy_init_job() instead.
+ *
+ * A job must only ever be registerd on the Policy once, and must be
+ * deregistered on the Policy on completion (whether or not that completion was
+ * success/failure).
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
+ */
+void kbasep_js_policy_register_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom);
+
+/**
+ * @brief De-register context/policy-wide information for a on the Job Scheduler Policy.
+ *
+ * This must be used before terminating the resources associated with using a
+ * job in the Job Scheduler Policy. This function does not itself terminate any
+ * resources, at most it just updates information in the policy and context.
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
+ */
+void kbasep_js_policy_deregister_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom);
+
+/**
+ * @brief Dequeue a Job for a job slot from the Job Scheduler Policy Run Pool
+ *
+ * The job returned by the policy will match at least one of the bits in the
+ * job slot's core requirements (but it may match more than one, or all @ref
+ * base_jd_core_req bits supported by the job slot).
+ *
+ * In addition, the requirements of the job returned will be a subset of those
+ * requested - the job returned will not have requirements that \a job_slot_idx
+ * cannot satisfy.
+ *
+ * The caller will submit the job to the GPU as soon as the GPU's NEXT register
+ * for the corresponding slot is empty. Of course, the GPU will then only run
+ * this new job when the currently executing job (in the jobslot's HEAD
+ * register) has completed.
+ *
+ * @return MALI_TRUE if a job was available, and *kctx_ptr points to
+ * the kctx dequeued.
+ * @return MALI_FALSE if no jobs were available among all ctxs in the Run Pool.
+ *
+ * @note base_jd_core_req is currently a u8 - beware of type conversion.
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_device_data::runpool_lock::irq will be held.
+ * - kbasep_js_device_data::runpool_mutex will be held.
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex. will be held
+ */
+mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev, int job_slot_idx, kbase_jd_atom ** const katom_ptr);
+
+/**
+ * @brief Requeue a Job back into the the Job Scheduler Policy Run Pool
+ *
+ * This will be used to enqueue a job after its creation and also to requeue
+ * a job into the Run Pool that was previously dequeued (running). It notifies
+ * the policy that the job should be run again at some point later.
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_device_data::runpool_irq::lock (a spinlock) will be held.
+ * - kbasep_js_device_data::runpool_mutex will be held.
+ * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
+ */
+void kbasep_js_policy_enqueue_job(kbasep_js_policy *js_policy, kbase_jd_atom *katom);
+
+/**
+ * @brief Log the result of a job: the time spent on a job/context, and whether
+ * the job failed or not.
+ *
+ * Since a kbase_jd_atom contains a pointer to the kbase_context owning it,
+ * then this can also be used to log time on either/both the job and the
+ * containing context.
+ *
+ * The completion state of the job can be found by examining \a katom->event.event_code
+ *
+ * If the Job failed and the policy is implementing fair-sharing, then the
+ * policy must penalize the failing job/context:
+ * - At the very least, it should penalize the time taken by the amount of
+ * time spent processing the IRQ in SW. This because a job in the NEXT slot
+ * waiting to run will be delayed until the failing job has had the IRQ
+ * cleared.
+ * - \b Optionally, the policy could apply other penalties. For example, based
+ * on a threshold of a number of failing jobs, after which a large penalty is
+ * applied.
+ *
+ * The kbasep_js_device_data::runpool_mutex will be held by the caller.
+ *
+ * @note This API is called from IRQ context.
+ *
+ * The caller has the following conditions on locking:
+ * - kbasep_js_device_data::runpool_irq::lock will be held.
+ *
+ * @param js_policy job scheduler policy
+ * @param katom job dispatch atom
+ * @param time_spent_us the time spent by the job, in microseconds (10^-6 seconds).
+ */
+void kbasep_js_policy_log_job_result(kbasep_js_policy *js_policy, kbase_jd_atom *katom, u64 time_spent_us);
+
+ /** @} *//* end group kbase_js_policy_job */
+
+ /** @} *//* end group kbase_js_policy */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif /* _KBASE_JS_POLICY_H_ */
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy_cfs.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy_cfs.c
new file mode 100755
index 00000000000..38917ce8a2d
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy_cfs.c
@@ -0,0 +1,1436 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/*
+ * Job Scheduler: Completely Fair Policy Implementation
+ */
+
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_kbase_jm.h>
+#include <kbase/src/common/mali_kbase_js.h>
+#include <kbase/src/common/mali_kbase_js_policy_cfs.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+#include <linux/sched/rt.h>
+#endif
+
+/**
+ * Define for when dumping is enabled.
+ * This should not be based on the instrumentation level as whether dumping is enabled for a particular level is down to the integrator.
+ * However this is being used for now as otherwise the cinstr headers would be needed.
+ */
+#define CINSTR_DUMPING_ENABLED (2 == MALI_INSTRUMENTATION_LEVEL)
+
+/** Fixed point constants used for runtime weight calculations */
+#define WEIGHT_FIXEDPOINT_SHIFT 10
+#define WEIGHT_TABLE_SIZE 40
+#define WEIGHT_0_NICE (WEIGHT_TABLE_SIZE/2)
+#define WEIGHT_0_VAL (1 << WEIGHT_FIXEDPOINT_SHIFT)
+
+#define LOOKUP_VARIANT_MASK ((1u<<KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS) - 1u)
+
+#define PROCESS_PRIORITY_MIN (-20)
+#define PROCESS_PRIORITY_MAX (19)
+
+/** Core requirements that all the variants support */
+#define JS_CORE_REQ_ALL_OTHERS \
+ (BASE_JD_REQ_CF | BASE_JD_REQ_V | BASE_JD_REQ_PERMON | BASE_JD_REQ_EXTERNAL_RESOURCES)
+
+/** Context requirements the all the variants support */
+
+/* In HW issue 8987 workaround, restrict Compute-only contexts and Compute jobs onto job slot[2],
+ * which will ensure their affinity does not intersect GLES jobs */
+#define JS_CTX_REQ_ALL_OTHERS_8987 \
+ (KBASE_CTX_FLAG_PRIVILEGED)
+#define JS_CORE_REQ_COMPUTE_SLOT_8987 \
+ (BASE_JD_REQ_CS)
+#define JS_CORE_REQ_ONLY_COMPUTE_SLOT_8987 \
+ (BASE_JD_REQ_ONLY_COMPUTE)
+
+/* Otherwise, compute-only contexts/compute jobs can use any job slot */
+#define JS_CTX_REQ_ALL_OTHERS \
+ (KBASE_CTX_FLAG_PRIVILEGED | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE)
+#define JS_CORE_REQ_COMPUTE_SLOT \
+ (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE)
+
+/* core_req variants are ordered by least restrictive first, so that our
+ * algorithm in cached_variant_idx_init picks the least restrictive variant for
+ * each job . Note that coherent_group requirement is added to all CS variants as the
+ * selection of job-slot does not depend on the coherency requirement. */
+static const kbasep_atom_req core_req_variants[] = {
+ {
+ /* 0: Fragment variant */
+ (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS),
+ 0},
+ {
+ /* 1: Compute variant, can use all coregroups */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT),
+ (JS_CTX_REQ_ALL_OTHERS),
+ 0},
+ {
+ /* 2: Compute variant, uses only coherent coregroups */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS),
+ 0},
+ {
+ /* 3: Compute variant, might only use coherent coregroup, and must use tiling */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_T),
+ (JS_CTX_REQ_ALL_OTHERS),
+ 0},
+
+ {
+ /* 4: Unused */
+ 0,
+ 0,
+ 0},
+
+ {
+ /* 5: Compute variant for specific-coherent-group targetting CoreGroup 0 */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS),
+ 0 /* device_nr */
+ },
+ {
+ /* 6: Compute variant for specific-coherent-group targetting CoreGroup 1 */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS),
+ 1 /* device_nr */
+ },
+
+ /* Unused core_req variants, to bring the total up to a power of 2 */
+ {
+ /* 7 */
+ 0,
+ 0,
+ 0},
+};
+
+static const kbasep_atom_req core_req_variants_8987[] = {
+ {
+ /* 0: Fragment variant */
+ (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS_8987),
+ 0},
+ {
+ /* 1: Compute variant, can use all coregroups */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT_8987),
+ (JS_CTX_REQ_ALL_OTHERS_8987),
+ 0},
+ {
+ /* 2: Compute variant, uses only coherent coregroups */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS_8987),
+ 0},
+ {
+ /* 3: Compute variant, might only use coherent coregroup, and must use tiling */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_T),
+ (JS_CTX_REQ_ALL_OTHERS_8987),
+ 0},
+
+ {
+ /* 4: Variant guarenteed to support Compute contexts/atoms
+ *
+ * In the case of a context that's specified as 'Only Compute', it'll
+ * not allow Tiler or Fragment atoms, and so those get rejected */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_ONLY_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS_8987 | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE),
+ 0},
+
+ {
+ /* 5: Compute variant for specific-coherent-group targetting CoreGroup 0
+ * Specifically, this only allows 'Only Compute' contexts/atoms */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_ONLY_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS_8987 | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE),
+ 0 /* device_nr */
+ },
+ {
+ /* 6: Compute variant for specific-coherent-group targetting CoreGroup 1
+ * Specifically, this only allows 'Only Compute' contexts/atoms */
+ (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_ONLY_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP),
+ (JS_CTX_REQ_ALL_OTHERS_8987 | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE),
+ 1 /* device_nr */
+ },
+ /* Unused core_req variants, to bring the total up to a power of 2 */
+ {
+ /* 7 */
+ 0,
+ 0,
+ 0},
+};
+
+#define CORE_REQ_VARIANT_FRAGMENT 0
+#define CORE_REQ_VARIANT_COMPUTE_ALL_CORES 1
+#define CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP 2
+#define CORE_REQ_VARIANT_COMPUTE_OR_TILING 3
+#define CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_0 5
+#define CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_1 6
+
+#define CORE_REQ_VARIANT_ONLY_COMPUTE_8987 4
+#define CORE_REQ_VARIANT_ONLY_COMPUTE_8987_SPECIFIC_COHERENT_0 5
+#define CORE_REQ_VARIANT_ONLY_COMPUTE_8987_SPECIFIC_COHERENT_1 6
+
+#define NUM_CORE_REQ_VARIANTS NELEMS(core_req_variants)
+#define NUM_CORE_REQ_VARIANTS_8987 NELEMS(core_req_variants_8987)
+
+/** Mappings between job slot and variant lists for Soft-Stoppable State */
+static const u32 variants_supported_ss_state[] = {
+ /* js[0] uses Fragment only */
+ (1u << CORE_REQ_VARIANT_FRAGMENT),
+
+ /* js[1] uses: Compute-all-cores, Compute-only-coherent, Compute-or-Tiling,
+ * compute-specific-coregroup-0 */
+ (1u << CORE_REQ_VARIANT_COMPUTE_ALL_CORES)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_OR_TILING)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_0),
+
+ /* js[2] uses: Compute-only-coherent, compute-specific-coregroup-1 */
+ (1u << CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_1)
+};
+
+/** Mappings between job slot and variant lists for Soft-Stoppable State, when
+ * we have atoms that can use all the cores (KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES)
+ * and there's more than one coregroup */
+static const u32 variants_supported_ss_allcore_state[] = {
+ /* js[0] uses Fragment only */
+ (1u << CORE_REQ_VARIANT_FRAGMENT),
+
+ /* js[1] uses: Compute-all-cores, Compute-only-coherent, Compute-or-Tiling,
+ * compute-specific-coregroup-0, compute-specific-coregroup-1 */
+ (1u << CORE_REQ_VARIANT_COMPUTE_ALL_CORES)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_OR_TILING)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_0)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_1),
+
+ /* js[2] not used */
+ 0
+};
+
+/** Mappings between job slot and variant lists for Soft-Stoppable State for
+ * BASE_HW_ISSUE_8987
+ *
+ * @note There is no 'allcores' variant of this, because this HW issue forces all
+ * atoms with BASE_JD_CORE_REQ_SPECIFIC_COHERENT_GROUP to use slot 2 anyway -
+ * hence regardless of whether a specific coregroup is targetted, those atoms
+ * still make progress. */
+static const u32 variants_supported_ss_state_8987[] = {
+ /* js[0] uses Fragment only */
+ (1u << CORE_REQ_VARIANT_FRAGMENT),
+
+ /* js[1] uses: Compute-all-cores, Compute-only-coherent, Compute-or-Tiling */
+ (1u << CORE_REQ_VARIANT_COMPUTE_ALL_CORES)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP)
+ | (1u << CORE_REQ_VARIANT_COMPUTE_OR_TILING),
+
+ /* js[2] uses: All Only-compute atoms (including those targetting a
+ * specific coregroup), and nothing else. This is because their affinity
+ * must not intersect with non-only-compute atoms.
+ *
+ * As a side effect, this causes the 'device_nr' for atoms targetting a
+ * specific coregroup to be ignored */
+ (1u << CORE_REQ_VARIANT_ONLY_COMPUTE_8987)
+ | (1u << CORE_REQ_VARIANT_ONLY_COMPUTE_8987_SPECIFIC_COHERENT_0)
+ | (1u << CORE_REQ_VARIANT_ONLY_COMPUTE_8987_SPECIFIC_COHERENT_1)
+};
+
+/* Defines for easy asserts 'is scheduled'/'is queued'/'is neither queued norscheduled' */
+#define KBASEP_JS_CHECKFLAG_QUEUED (1u << 0) /**< Check the queued state */
+#define KBASEP_JS_CHECKFLAG_SCHEDULED (1u << 1) /**< Check the scheduled state */
+#define KBASEP_JS_CHECKFLAG_IS_QUEUED (1u << 2) /**< Expect queued state to be set */
+#define KBASEP_JS_CHECKFLAG_IS_SCHEDULED (1u << 3) /**< Expect scheduled state to be set */
+
+enum {
+ KBASEP_JS_CHECK_NOTQUEUED = KBASEP_JS_CHECKFLAG_QUEUED,
+ KBASEP_JS_CHECK_NOTSCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED,
+ KBASEP_JS_CHECK_QUEUED = KBASEP_JS_CHECKFLAG_QUEUED | KBASEP_JS_CHECKFLAG_IS_QUEUED,
+ KBASEP_JS_CHECK_SCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED | KBASEP_JS_CHECKFLAG_IS_SCHEDULED
+};
+
+typedef u32 kbasep_js_check;
+
+/*
+ * Private Functions
+ */
+
+/* Table autogenerated using util built from: kbase/scripts/gen_cfs_weight_of_prio.c */
+
+/* weight = 1.25 */
+static const int weight_of_priority[] = {
+ /* -20 */ 11, 14, 18, 23,
+ /* -16 */ 29, 36, 45, 56,
+ /* -12 */ 70, 88, 110, 137,
+ /* -8 */ 171, 214, 268, 335,
+ /* -4 */ 419, 524, 655, 819,
+ /* 0 */ 1024, 1280, 1600, 2000,
+ /* 4 */ 2500, 3125, 3906, 4883,
+ /* 8 */ 6104, 7630, 9538, 11923,
+ /* 12 */ 14904, 18630, 23288, 29110,
+ /* 16 */ 36388, 45485, 56856, 71070
+};
+
+/**
+ * @note There is nothing to stop the priority of the ctx containing \a
+ * ctx_info changing during or immediately after this function is called
+ * (because its jsctx_mutex cannot be held during IRQ). Therefore, this
+ * function should only be seen as a heuristic guide as to the priority weight
+ * of the context.
+ */
+STATIC u64 priority_weight(kbasep_js_policy_cfs_ctx *ctx_info, u64 time_us)
+{
+ u64 time_delta_us;
+ int priority;
+ priority = ctx_info->process_priority + ctx_info->bag_priority;
+
+ /* Adjust runtime_us using priority weight if required */
+ if (priority != 0 && time_us != 0) {
+ int clamped_priority;
+
+ /* Clamp values to min..max weights */
+ if (priority > PROCESS_PRIORITY_MAX)
+ clamped_priority = PROCESS_PRIORITY_MAX;
+ else if (priority < PROCESS_PRIORITY_MIN)
+ clamped_priority = PROCESS_PRIORITY_MIN;
+ else
+ clamped_priority = priority;
+
+ /* Fixed point multiplication */
+ time_delta_us = (time_us * weight_of_priority[WEIGHT_0_NICE + clamped_priority]);
+ /* Remove fraction */
+ time_delta_us = time_delta_us >> WEIGHT_FIXEDPOINT_SHIFT;
+ /* Make sure the time always increases */
+ if (0 == time_delta_us)
+ time_delta_us++;
+ } else {
+ time_delta_us = time_us;
+ }
+
+ return time_delta_us;
+}
+
+#if KBASE_TRACE_ENABLE != 0
+STATIC int kbasep_js_policy_trace_get_refcnt_nolock(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_device_data *js_devdata;
+ int as_nr;
+ int refcnt = 0;
+
+ js_devdata = &kbdev->js_data;
+
+ as_nr = kctx->as_nr;
+ if (as_nr != KBASEP_AS_NR_INVALID) {
+ kbasep_js_per_as_data *js_per_as_data;
+ js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
+
+ refcnt = js_per_as_data->as_busy_refcount;
+ }
+
+ return refcnt;
+}
+
+STATIC INLINE int kbasep_js_policy_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
+{
+ unsigned long flags;
+ kbasep_js_device_data *js_devdata;
+ int refcnt = 0;
+
+ js_devdata = &kbdev->js_data;
+
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ refcnt = kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ return refcnt;
+}
+#else /* KBASE_TRACE_ENABLE != 0 */
+STATIC int kbasep_js_policy_trace_get_refcnt_nolock(kbase_device *kbdev, kbase_context *kctx)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(kctx);
+ return 0;
+}
+
+STATIC INLINE int kbasep_js_policy_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
+{
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(kctx);
+ return 0;
+}
+#endif /* KBASE_TRACE_ENABLE != 0 */
+
+#ifdef CONFIG_MALI_DEBUG
+STATIC void kbasep_js_debug_check(kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag)
+{
+ /* This function uses the ternary operator and non-explicit comparisons,
+ * because it makes for much shorter, easier to read code */
+
+ if (check_flag & KBASEP_JS_CHECKFLAG_QUEUED) {
+ mali_bool is_queued;
+ mali_bool expect_queued;
+ is_queued = (kbasep_list_member_of(&policy_info->ctx_queue_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
+
+ if (!is_queued)
+ is_queued = (kbasep_list_member_of(&policy_info->ctx_rt_queue_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
+
+ expect_queued = (check_flag & KBASEP_JS_CHECKFLAG_IS_QUEUED) ? MALI_TRUE : MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT_MSG(expect_queued == is_queued, "Expected context %p to be %s but it was %s\n", kctx, (expect_queued) ? "queued" : "not queued", (is_queued) ? "queued" : "not queued");
+
+ }
+
+ if (check_flag & KBASEP_JS_CHECKFLAG_SCHEDULED) {
+ mali_bool is_scheduled;
+ mali_bool expect_scheduled;
+ is_scheduled = (kbasep_list_member_of(&policy_info->scheduled_ctxs_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
+
+ expect_scheduled = (check_flag & KBASEP_JS_CHECKFLAG_IS_SCHEDULED) ? MALI_TRUE : MALI_FALSE;
+ KBASE_DEBUG_ASSERT_MSG(expect_scheduled == is_scheduled, "Expected context %p to be %s but it was %s\n", kctx, (expect_scheduled) ? "scheduled" : "not scheduled", (is_scheduled) ? "scheduled" : "not scheduled");
+
+ }
+
+}
+#else /* CONFIG_MALI_DEBUG */
+STATIC void kbasep_js_debug_check(kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag)
+{
+ CSTD_UNUSED(policy_info);
+ CSTD_UNUSED(kctx);
+ CSTD_UNUSED(check_flag);
+ return;
+}
+#endif /* CONFIG_MALI_DEBUG */
+
+STATIC INLINE void set_slot_to_variant_lookup(u32 *bit_array, u32 slot_idx, u32 variants_supported)
+{
+ u32 overall_bit_idx = slot_idx * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS;
+ u32 word_idx = overall_bit_idx / 32;
+ u32 bit_idx = overall_bit_idx % 32;
+
+ KBASE_DEBUG_ASSERT(slot_idx < BASE_JM_MAX_NR_SLOTS);
+ KBASE_DEBUG_ASSERT((variants_supported & ~LOOKUP_VARIANT_MASK) == 0);
+
+ bit_array[word_idx] |= variants_supported << bit_idx;
+}
+
+STATIC INLINE u32 get_slot_to_variant_lookup(u32 *bit_array, u32 slot_idx)
+{
+ u32 overall_bit_idx = slot_idx * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS;
+ u32 word_idx = overall_bit_idx / 32;
+ u32 bit_idx = overall_bit_idx % 32;
+
+ u32 res;
+
+ KBASE_DEBUG_ASSERT(slot_idx < BASE_JM_MAX_NR_SLOTS);
+
+ res = bit_array[word_idx] >> bit_idx;
+ res &= LOOKUP_VARIANT_MASK;
+
+ return res;
+}
+
+/* Check the core_req_variants: make sure that every job slot is satisifed by
+ * one of the variants. This checks that cached_variant_idx_init will produce a
+ * valid result for jobs that make maximum use of the job slots.
+ *
+ * @note The checks are limited to the job slots - this does not check that
+ * every context requirement is covered (because some are intentionally not
+ * supported, such as KBASE_CTX_FLAG_SUBMIT_DISABLED) */
+#ifdef CONFIG_MALI_DEBUG
+STATIC void debug_check_core_req_variants(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
+{
+ kbasep_js_device_data *js_devdata;
+ u32 i;
+ int j;
+
+ js_devdata = &kbdev->js_data;
+
+ for (j = 0; j < kbdev->gpu_props.num_job_slots; ++j) {
+ base_jd_core_req job_core_req;
+ mali_bool found = MALI_FALSE;
+
+ job_core_req = js_devdata->js_reqs[j];
+ for (i = 0; i < policy_info->num_core_req_variants; ++i) {
+ base_jd_core_req var_core_req;
+ var_core_req = policy_info->core_req_variants[i].core_req;
+
+ if ((var_core_req & job_core_req) == job_core_req) {
+ found = MALI_TRUE;
+ break;
+ }
+ }
+
+ /* Early-out on any failure */
+ KBASE_DEBUG_ASSERT_MSG(found != MALI_FALSE, "Job slot %d features 0x%x not matched by core_req_variants. " "Rework core_req_variants and vairants_supported_<...>_state[] to match\n", j, job_core_req);
+ }
+}
+#endif
+
+STATIC void build_core_req_variants(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
+{
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(policy_info != NULL);
+ CSTD_UNUSED(kbdev);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+ KBASE_DEBUG_ASSERT(NUM_CORE_REQ_VARIANTS_8987 <= KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS);
+
+ /* Assume a static set of variants */
+ memcpy(policy_info->core_req_variants, core_req_variants_8987, sizeof(core_req_variants_8987));
+
+ policy_info->num_core_req_variants = NUM_CORE_REQ_VARIANTS_8987;
+ } else {
+ KBASE_DEBUG_ASSERT(NUM_CORE_REQ_VARIANTS <= KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS);
+
+ /* Assume a static set of variants */
+ memcpy(policy_info->core_req_variants, core_req_variants, sizeof(core_req_variants));
+
+ policy_info->num_core_req_variants = NUM_CORE_REQ_VARIANTS;
+ }
+
+ KBASE_DEBUG_CODE(debug_check_core_req_variants(kbdev, policy_info));
+}
+
+STATIC void build_slot_lookups(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
+{
+ u8 i;
+ const u32 *variants_supported_ss_for_this_hw = variants_supported_ss_state;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(policy_info != NULL);
+
+ KBASE_DEBUG_ASSERT(kbdev->gpu_props.num_job_slots <= NELEMS(variants_supported_ss_state));
+ KBASE_DEBUG_ASSERT(kbdev->gpu_props.num_job_slots <= NELEMS(variants_supported_ss_allcore_state));
+ KBASE_DEBUG_ASSERT(kbdev->gpu_props.num_job_slots <= NELEMS(variants_supported_ss_state_8987));
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+ variants_supported_ss_for_this_hw = variants_supported_ss_state_8987;
+
+ /* Given the static set of variants, provide a static set of lookups */
+ for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
+ set_slot_to_variant_lookup(policy_info->slot_to_variant_lookup_ss_state, i, variants_supported_ss_for_this_hw[i]);
+
+ set_slot_to_variant_lookup(policy_info->slot_to_variant_lookup_ss_allcore_state, i, variants_supported_ss_allcore_state[i]);
+ }
+
+}
+
+STATIC mali_error cached_variant_idx_init(const kbasep_js_policy_cfs *policy_info, const kbase_context *kctx, kbase_jd_atom *atom)
+{
+ kbasep_js_policy_cfs_job *job_info;
+ u32 i;
+ base_jd_core_req job_core_req;
+ u32 job_device_nr;
+ kbase_context_flags ctx_flags;
+ const kbasep_js_kctx_info *js_kctx_info;
+ const kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(policy_info != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+
+ kbdev = container_of(policy_info, const kbase_device, js_data.policy.cfs);
+ job_info = &atom->sched_info.cfs;
+ job_core_req = atom->core_req;
+ job_device_nr = atom->device_nr;
+ js_kctx_info = &kctx->jctx.sched_info;
+ ctx_flags = js_kctx_info->ctx.flags;
+
+ /* Initial check for atoms targetting a specific coregroup */
+ if ((job_core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) != MALI_FALSE && job_device_nr >= kbdev->gpu_props.num_core_groups) {
+ /* device_nr exceeds the number of coregroups - not allowed by
+ * @ref base_jd_atom API contract */
+ return MALI_ERROR_FUNCTION_FAILED;
+ }
+
+ /* Pick a core_req variant that matches us. Since they're ordered by least
+ * restrictive first, it picks the least restrictive variant */
+ for (i = 0; i < policy_info->num_core_req_variants; ++i) {
+ base_jd_core_req var_core_req;
+ kbase_context_flags var_ctx_req;
+ u32 var_device_nr;
+ var_core_req = policy_info->core_req_variants[i].core_req;
+ var_ctx_req = policy_info->core_req_variants[i].ctx_req;
+ var_device_nr = policy_info->core_req_variants[i].device_nr;
+
+ if ((var_core_req & job_core_req) == job_core_req && (var_ctx_req & ctx_flags) == ctx_flags && ((var_core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) == MALI_FALSE || var_device_nr == job_device_nr)) {
+ job_info->cached_variant_idx = i;
+ return MALI_ERROR_NONE;
+ }
+ }
+
+ /* Could not find a matching requirement, this should only be caused by an
+ * attempt to attack the driver. */
+ return MALI_ERROR_FUNCTION_FAILED;
+}
+
+STATIC mali_bool dequeue_job(kbase_device *kbdev,
+ kbase_context *kctx,
+ u32 variants_supported,
+ kbase_jd_atom ** const katom_ptr,
+ int job_slot_idx)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_policy_cfs *policy_info;
+ kbasep_js_policy_cfs_ctx *ctx_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(katom_ptr != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ policy_info = &js_devdata->policy.cfs;
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ /* Only submit jobs from contexts that are allowed */
+ if (kbasep_js_is_submit_allowed(js_devdata, kctx) != MALI_FALSE) {
+ /* Check each variant in turn */
+ while (variants_supported != 0) {
+ long variant_idx;
+ struct list_head *job_list;
+ variant_idx = ffs(variants_supported) - 1;
+ job_list = &ctx_info->job_list_head[variant_idx];
+
+ if (!list_empty(job_list)) {
+ /* Found a context with a matching job */
+ {
+ kbase_jd_atom *front_atom = list_entry(job_list->next, kbase_jd_atom, sched_info.cfs.list);
+ KBASE_TRACE_ADD_SLOT(kbdev, JS_POLICY_DEQUEUE_JOB, front_atom->kctx, front_atom, front_atom->jc, job_slot_idx);
+ }
+ *katom_ptr = list_entry(job_list->next, kbase_jd_atom, sched_info.cfs.list);
+ list_del(job_list->next);
+
+ (*katom_ptr)->sched_info.cfs.ticks = 0;
+
+ /* Put this context at the back of the Run Pool */
+ list_del(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
+ list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, &policy_info->scheduled_ctxs_head);
+
+ return MALI_TRUE;
+ }
+
+ variants_supported &= ~(1u << variant_idx);
+ }
+ /* All variants checked by here */
+ }
+
+ /* The context does not have a matching job */
+
+ return MALI_FALSE;
+}
+
+/**
+ * Hold the runpool_irq spinlock for this
+ */
+STATIC INLINE mali_bool timer_callback_should_run(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ s8 nr_running_ctxs;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+
+ /* nr_user_contexts_running is updated with the runpool_mutex. However, the
+ * locking in the caller gives us a barrier that ensures nr_user_contexts is
+ * up-to-date for reading */
+ nr_running_ctxs = js_devdata->nr_user_contexts_running;
+
+#ifdef CONFIG_MALI_DEBUG
+ if (js_devdata->softstop_always && nr_running_ctxs > 0) {
+ /* Debug support for allowing soft-stop on a single context */
+ return MALI_TRUE;
+ }
+#endif /* CONFIG_MALI_DEBUG */
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
+ /* Timeouts would have to be 4x longer (due to micro-architectural design)
+ * to support OpenCL conformance tests, so only run the timer when there's:
+ * - 2 or more CL contexts
+ * - 1 or more GLES contexts
+ *
+ * NOTE: We will treat a context that has both Compute and Non-Compute jobs
+ * will be treated as an OpenCL context (hence, we don't check
+ * KBASEP_JS_CTX_ATTR_NON_COMPUTE).
+ */
+ {
+ s8 nr_compute_ctxs = kbasep_js_ctx_attr_count_on_runpool(kbdev, KBASEP_JS_CTX_ATTR_COMPUTE);
+ s8 nr_noncompute_ctxs = nr_running_ctxs - nr_compute_ctxs;
+
+ return (mali_bool) (nr_compute_ctxs >= 2 || nr_noncompute_ctxs > 0);
+ }
+ } else {
+ /* Run the timer callback whenever you have at least 1 context */
+ return (mali_bool) (nr_running_ctxs > 0);
+ }
+}
+
+static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+{
+ unsigned long flags;
+ kbase_device *kbdev;
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_policy_cfs *policy_info;
+ int s;
+ mali_bool reset_needed = MALI_FALSE;
+
+ KBASE_DEBUG_ASSERT(timer != NULL);
+
+ policy_info = container_of(timer, kbasep_js_policy_cfs, scheduling_timer);
+ kbdev = container_of(policy_info, kbase_device, js_data.policy.cfs);
+ js_devdata = &kbdev->js_data;
+
+ /* Loop through the slots */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
+ kbase_jm_slot *slot = &kbdev->jm_slots[s];
+ kbase_jd_atom *atom = NULL;
+
+ if (kbasep_jm_nr_jobs_submitted(slot) > 0) {
+ atom = kbasep_jm_peek_idx_submit_slot(slot, 0);
+ KBASE_DEBUG_ASSERT(atom != NULL);
+
+ if (kbasep_jm_is_dummy_workaround_job(kbdev, atom) != MALI_FALSE) {
+ /* Prevent further use of the atom - never cause a soft-stop, hard-stop, or a GPU reset due to it. */
+ atom = NULL;
+ }
+ }
+
+ if (atom != NULL) {
+ /* The current version of the model doesn't support Soft-Stop */
+ if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
+ u32 ticks = atom->sched_info.cfs.ticks++;
+
+#if !CINSTR_DUMPING_ENABLED
+ /* Job is Soft-Stoppable */
+ if (ticks == js_devdata->soft_stop_ticks) {
+ /* Job has been scheduled for at least js_devdata->soft_stop_ticks ticks.
+ * Soft stop the slot so we can run other jobs.
+ */
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Soft-stop");
+
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
+ kbase_job_slot_softstop(kbdev, s, atom);
+#endif
+ } else if (ticks == js_devdata->hard_stop_ticks_ss) {
+ /* Job has been scheduled for at least js_devdata->hard_stop_ticks_ss ticks.
+ * It should have been soft-stopped by now. Hard stop the slot.
+ */
+#if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
+ KBASE_DEBUG_PRINT_WARN(KBASE_JM, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", (unsigned long)ticks, (unsigned long)(js_devdata->scheduling_tick_ns / 1000000u));
+ kbase_job_slot_hardstop(atom->kctx, s, atom);
+#endif
+ } else if (ticks == js_devdata->gpu_reset_ticks_ss) {
+ /* Job has been scheduled for at least js_devdata->gpu_reset_ticks_ss ticks.
+ * It should have left the GPU by now. Signal that the GPU needs to be reset.
+ */
+ reset_needed = MALI_TRUE;
+ }
+#else /* !CINSTR_DUMPING_ENABLED */
+ /* NOTE: During CINSTR_DUMPING_ENABLED, we use the alternate timeouts, which
+ * makes the hard-stop and GPU reset timeout much longer. We also ensure that
+ * we don't soft-stop at all. */
+ if (ticks == js_devdata->soft_stop_ticks) {
+ /* Job has been scheduled for at least js_devdata->soft_stop_ticks.
+ * We do not soft-stop during CINSTR_DUMPING_ENABLED, however.
+ */
+ KBASE_DEBUG_PRINT_INFO(KBASE_JM, "Soft-stop");
+ } else if (ticks == js_devdata->hard_stop_ticks_nss) {
+ /* Job has been scheduled for at least js_devdata->hard_stop_ticks_nss ticks.
+ * Hard stop the slot.
+ */
+#if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
+ KBASE_DEBUG_PRINT_WARN(KBASE_JM, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", (unsigned long)ticks, (unsigned long)(js_devdata->scheduling_tick_ns / 1000000u));
+ kbase_job_slot_hardstop(atom->kctx, s, atom);
+#endif
+ } else if (ticks == js_devdata->gpu_reset_ticks_nss) {
+ /* Job has been scheduled for at least js_devdata->gpu_reset_ticks_nss ticks.
+ * It should have left the GPU by now. Signal that the GPU needs to be reset.
+ */
+ reset_needed = MALI_TRUE;
+ }
+#endif /* !CINSTR_DUMPING_ENABLED */
+ }
+ }
+ }
+
+ if (reset_needed) {
+ KBASE_DEBUG_PRINT_ERROR(KBASE_JM, "JS: Job has been on the GPU for too long (KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS/NSS timeout hit). Issueing GPU soft-reset to resolve.");
+
+ if (kbase_prepare_to_reset_gpu_locked(kbdev))
+ kbase_reset_gpu_locked(kbdev);
+ }
+
+ /* the timer is re-issued if there is contexts in the run-pool */
+
+ if (timer_callback_should_run(kbdev) != MALI_FALSE) {
+ hrtimer_start(&policy_info->scheduling_timer, HR_TIMER_DELAY_NSEC(js_devdata->scheduling_tick_ns), HRTIMER_MODE_REL);
+ } else {
+ KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_END, NULL, NULL, 0u, 0u);
+ /* timer_running state is updated by kbasep_js_policy_runpool_timers_sync() */
+ }
+
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * Non-private functions
+ */
+
+mali_error kbasep_js_policy_init(kbase_device *kbdev)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_policy_cfs *policy_info;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
+ policy_info = &js_devdata->policy.cfs;
+
+ INIT_LIST_HEAD(&policy_info->ctx_queue_head);
+ INIT_LIST_HEAD(&policy_info->scheduled_ctxs_head);
+ INIT_LIST_HEAD(&policy_info->ctx_rt_queue_head);
+
+ atomic64_set(&policy_info->least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
+ atomic64_set(&policy_info->rt_least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
+
+ hrtimer_init(&policy_info->scheduling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ policy_info->scheduling_timer.function = timer_callback;
+
+ policy_info->timer_running = MALI_FALSE;
+ policy_info->head_runtime_us = 0;
+
+ /* Build up the core_req variants */
+ build_core_req_variants(kbdev, policy_info);
+ /* Build the slot to variant lookups */
+ build_slot_lookups(kbdev, policy_info);
+
+ return MALI_ERROR_NONE;
+}
+
+void kbasep_js_policy_term(kbasep_js_policy *js_policy)
+{
+ kbasep_js_policy_cfs *policy_info;
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ policy_info = &js_policy->cfs;
+
+ /* ASSERT that there are no contexts queued */
+ KBASE_DEBUG_ASSERT(list_empty(&policy_info->ctx_queue_head));
+ KBASE_DEBUG_ASSERT(KBASEP_JS_RUNTIME_EMPTY == atomic64_read(&policy_info->least_runtime_us));
+
+ /* ASSERT that there are no contexts scheduled */
+ KBASE_DEBUG_ASSERT(list_empty(&policy_info->scheduled_ctxs_head));
+
+ /* ASSERT that there are no contexts queued */
+ KBASE_DEBUG_ASSERT(list_empty(&policy_info->ctx_rt_queue_head));
+ KBASE_DEBUG_ASSERT(KBASEP_JS_RUNTIME_EMPTY == atomic64_read(&policy_info->rt_least_runtime_us));
+
+ hrtimer_cancel(&policy_info->scheduling_timer);
+}
+
+mali_error kbasep_js_policy_init_ctx(kbase_device *kbdev, kbase_context *kctx)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_policy_cfs_ctx *ctx_info;
+ kbasep_js_policy_cfs *policy_info;
+ u32 i;
+ int policy;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ js_devdata = &kbdev->js_data;
+ policy_info = &kbdev->js_data.policy.cfs;
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_INIT_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
+
+ for (i = 0; i < policy_info->num_core_req_variants; ++i)
+ INIT_LIST_HEAD(&ctx_info->job_list_head[i]);
+
+ policy = current->policy;
+ if (policy == SCHED_FIFO || policy == SCHED_RR) {
+ ctx_info->process_rt_policy = MALI_TRUE;
+ ctx_info->process_priority = (((MAX_RT_PRIO - 1) - current->rt_priority) / 5) - 20;
+ } else {
+ ctx_info->process_rt_policy = MALI_FALSE;
+ ctx_info->process_priority = (current->static_prio - MAX_RT_PRIO) - 20;
+ }
+
+ ctx_info->bag_total_priority = 0;
+ ctx_info->bag_total_nr_atoms = 0;
+
+ /* Initial runtime (relative to least-run context runtime)
+ *
+ * This uses the Policy Queue's most up-to-date head_runtime_us by using the
+ * queue mutex to issue memory barriers - also ensure future updates to
+ * head_runtime_us occur strictly after this context is initialized */
+ mutex_lock(&js_devdata->queue_mutex);
+
+ /* No need to hold the the runpool_irq.lock here, because we're initializing
+ * the value, and the context is definitely not being updated in the
+ * runpool at this point. The queue_mutex ensures the memory barrier. */
+ ctx_info->runtime_us = policy_info->head_runtime_us + priority_weight(ctx_info, (u64) js_devdata->cfs_ctx_runtime_init_slices * (u64) (js_devdata->ctx_timeslice_ns / 1000u));
+
+ mutex_unlock(&js_devdata->queue_mutex);
+
+ return MALI_ERROR_NONE;
+}
+
+void kbasep_js_policy_term_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+{
+ kbasep_js_policy_cfs_ctx *ctx_info;
+ kbasep_js_policy_cfs *policy_info;
+ u32 i;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ policy_info = &js_policy->cfs;
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ {
+ kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_TERM_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
+ }
+
+ /* ASSERT that no jobs are present */
+ for (i = 0; i < policy_info->num_core_req_variants; ++i)
+ KBASE_DEBUG_ASSERT(list_empty(&ctx_info->job_list_head[i]));
+
+ /* No work to do */
+}
+
+/*
+ * Context Management
+ */
+
+void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+{
+ kbasep_js_policy_cfs *policy_info;
+ kbasep_js_policy_cfs_ctx *ctx_info;
+ kbase_context *head_ctx;
+ kbase_context *list_kctx = NULL;
+ kbasep_js_device_data *js_devdata;
+ struct list_head *queue_head;
+ struct list_head *pos;
+ kbase_device *kbdev;
+ atomic64_t *least_runtime_us;
+ u64 head_runtime;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ policy_info = &js_policy->cfs;
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+ kbdev = container_of(js_policy, kbase_device, js_data.policy);
+ js_devdata = &kbdev->js_data;
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_ENQUEUE_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
+
+ /* ASSERT about scheduled-ness/queued-ness */
+ kbasep_js_debug_check(policy_info, kctx, KBASEP_JS_CHECK_NOTQUEUED);
+
+ /* Clamp the runtime to prevent DoS attacks through "stored-up" runtime */
+ if (policy_info->head_runtime_us > ctx_info->runtime_us + (u64) js_devdata->cfs_ctx_runtime_min_slices * (u64) (js_devdata->ctx_timeslice_ns / 1000u)) {
+ /* No need to hold the the runpool_irq.lock here, because we're essentially
+ * initializing the value, and the context is definitely not being updated in the
+ * runpool at this point. The queue_mutex held by the caller ensures the memory
+ * barrier. */
+ ctx_info->runtime_us = policy_info->head_runtime_us - (u64) js_devdata->cfs_ctx_runtime_min_slices * (u64) (js_devdata->ctx_timeslice_ns / 1000u);
+ }
+
+ /* Find the position where the context should be enqueued */
+ if (ctx_info->process_rt_policy) {
+ queue_head = &policy_info->ctx_rt_queue_head;
+ least_runtime_us = &policy_info->rt_least_runtime_us;
+ } else {
+ queue_head = &policy_info->ctx_queue_head;
+ least_runtime_us = &policy_info->least_runtime_us;
+ }
+
+ if (list_empty(queue_head)) {
+ list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, queue_head);
+ } else {
+ list_for_each(pos, queue_head) {
+ kbasep_js_policy_cfs_ctx *list_ctx_info;
+
+ list_kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+ list_ctx_info = &list_kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ if ((kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0)
+ break;
+
+ if ((list_ctx_info->runtime_us > ctx_info->runtime_us) && ((list_kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) == 0))
+ break;
+ }
+
+ /* Add the context to the queue */
+ list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, &list_kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
+ }
+
+ /* Ensure least_runtime_us is up to date*/
+ head_ctx = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+ head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
+ atomic64_set(least_runtime_us, head_runtime);
+}
+
+mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_context ** const kctx_ptr)
+{
+ kbasep_js_policy_cfs *policy_info;
+ kbase_context *head_ctx;
+ struct list_head *queue_head;
+ atomic64_t *least_runtime_us;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx_ptr != NULL);
+
+ policy_info = &js_policy->cfs;
+ kbdev = container_of(js_policy, kbase_device, js_data.policy);
+
+ /* attempt to dequeue from the 'realttime' queue first */
+ if (list_empty(&policy_info->ctx_rt_queue_head)) {
+ if (list_empty(&policy_info->ctx_queue_head)) {
+ /* Nothing to dequeue */
+ return MALI_FALSE;
+ } else {
+ queue_head = &policy_info->ctx_queue_head;
+ least_runtime_us = &policy_info->least_runtime_us;
+ }
+ } else {
+ queue_head = &policy_info->ctx_rt_queue_head;
+ least_runtime_us = &policy_info->rt_least_runtime_us;
+ }
+
+ /* Contexts are dequeued from the front of the queue */
+ *kctx_ptr = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+ /* If dequeuing will empty the list, then set least_runtime_us prior to deletion */
+ if (queue_head->next->next == queue_head)
+ atomic64_set(least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
+ list_del(queue_head->next);
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_DEQUEUE_HEAD_CTX, *kctx_ptr, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, *kctx_ptr));
+
+ /* Update the head runtime */
+ if (!list_empty(queue_head)) {
+ u64 head_runtime;
+
+ head_ctx = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+
+ /* No need to hold the the runpool_irq.lock here for reading - the
+ * context is definitely not being updated in the runpool at this
+ * point. The queue_mutex held by the caller ensures the memory barrier. */
+ head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
+
+ if (head_runtime > policy_info->head_runtime_us)
+ policy_info->head_runtime_us = head_runtime;
+
+ atomic64_set(least_runtime_us, head_runtime);
+ }
+
+ return MALI_TRUE;
+}
+
+mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+{
+ kbasep_js_policy_cfs_ctx *ctx_info;
+ kbasep_js_policy_cfs *policy_info;
+ mali_bool is_present;
+ struct list_head *queue_head;
+ atomic64_t *least_runtime_us;
+ struct list_head *qhead;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ policy_info = &js_policy->cfs;
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+ kbdev = container_of(js_policy, kbase_device, js_data.policy);
+
+ if (ctx_info->process_rt_policy) {
+ queue_head = &policy_info->ctx_rt_queue_head;
+ least_runtime_us = &policy_info->rt_least_runtime_us;
+ } else {
+ queue_head = &policy_info->ctx_queue_head;
+ least_runtime_us = &policy_info->least_runtime_us;
+ }
+
+ qhead = queue_head;
+
+ is_present = kbasep_list_member_of(qhead, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
+
+ KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, JS_POLICY_TRY_EVICT_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx), is_present);
+
+ if (is_present != MALI_FALSE) {
+ kbase_context *head_ctx;
+ qhead = queue_head;
+
+ /* If dequeuing will empty the list, then set least_runtime_us prior to deletion */
+ if (queue_head->next->next == queue_head)
+ atomic64_set(least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
+
+ /* Remove the context */
+ list_del(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
+
+ qhead = queue_head;
+ /* Update the head runtime */
+ if (!list_empty(qhead)) {
+ u64 head_runtime;
+
+ head_ctx = list_entry(qhead->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+
+ /* No need to hold the the runpool_irq.lock here for reading - the
+ * context is definitely not being updated in the runpool at this
+ * point. The queue_mutex held by the caller ensures the memory barrier. */
+ head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
+
+ if (head_runtime > policy_info->head_runtime_us)
+ policy_info->head_runtime_us = head_runtime;
+
+ atomic64_set(least_runtime_us, head_runtime);
+ }
+ }
+
+ return is_present;
+}
+
+void kbasep_js_policy_foreach_ctx_job(kbasep_js_policy *js_policy, kbase_context *kctx,
+ kbasep_js_policy_ctx_job_cb callback, mali_bool detach_jobs)
+{
+ kbasep_js_policy_cfs *policy_info;
+ kbasep_js_policy_cfs_ctx *ctx_info;
+ kbase_device *kbdev;
+ u32 i;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbdev = container_of(js_policy, kbase_device, js_data.policy);
+ policy_info = &js_policy->cfs;
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
+
+ /* Invoke callback on jobs on each variant in turn */
+ for (i = 0; i < policy_info->num_core_req_variants; ++i) {
+ struct list_head *job_list;
+ struct kbase_jd_atom *atom;
+ struct kbase_jd_atom *tmp_iter;
+ job_list = &ctx_info->job_list_head[i];
+ /* Invoke callback on all kbase_jd_atoms in this list, optionally
+ * removing them from the list */
+ list_for_each_entry_safe(atom, tmp_iter, job_list, sched_info.cfs.list) {
+ if (detach_jobs)
+ list_del(&atom->sched_info.cfs.list);
+ callback(kbdev, atom);
+ }
+ }
+
+}
+
+void kbasep_js_policy_runpool_add_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+{
+ kbasep_js_policy_cfs *policy_info;
+ kbasep_js_device_data *js_devdata;
+ kbase_device *kbdev;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ policy_info = &js_policy->cfs;
+ js_devdata = container_of(js_policy, kbasep_js_device_data, policy);
+
+ kbdev = kctx->kbdev;
+
+ {
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_RUNPOOL_ADD_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx));
+ }
+
+ /* ASSERT about scheduled-ness/queued-ness */
+ kbasep_js_debug_check(policy_info, kctx, KBASEP_JS_CHECK_NOTSCHEDULED);
+
+ /* All enqueued contexts go to the back of the runpool */
+ list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, &policy_info->scheduled_ctxs_head);
+
+ if (timer_callback_should_run(kbdev) != MALI_FALSE && policy_info->timer_running == MALI_FALSE) {
+ hrtimer_start(&policy_info->scheduling_timer, HR_TIMER_DELAY_NSEC(js_devdata->scheduling_tick_ns), HRTIMER_MODE_REL);
+
+ KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u, 0u);
+ policy_info->timer_running = MALI_TRUE;
+ }
+}
+
+void kbasep_js_policy_runpool_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+{
+ kbasep_js_policy_cfs *policy_info;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ policy_info = &js_policy->cfs;
+
+ {
+ kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
+ KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_RUNPOOL_REMOVE_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx));
+ }
+
+ /* ASSERT about scheduled-ness/queued-ness */
+ kbasep_js_debug_check(policy_info, kctx, KBASEP_JS_CHECK_SCHEDULED);
+
+ /* No searching or significant list maintenance required to remove this context */
+ list_del(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
+
+}
+
+mali_bool kbasep_js_policy_should_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+{
+ kbasep_js_policy_cfs_ctx *ctx_info;
+ kbasep_js_policy_cfs *policy_info;
+ kbasep_js_device_data *js_devdata;
+ u64 least_runtime_us;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ policy_info = &js_policy->cfs;
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+ js_devdata = container_of(js_policy, kbasep_js_device_data, policy);
+
+ if (ctx_info->process_rt_policy)
+ least_runtime_us = atomic64_read(&policy_info->rt_least_runtime_us);
+ else
+ least_runtime_us = atomic64_read(&policy_info->least_runtime_us);
+
+ if (KBASEP_JS_RUNTIME_EMPTY == least_runtime_us) {
+ /* Queue is empty */
+ return MALI_FALSE;
+ }
+
+ if ((least_runtime_us + priority_weight(ctx_info, (u64) (js_devdata->ctx_timeslice_ns / 1000u)))
+ < ctx_info->runtime_us) {
+ /* The context is scheduled out if it's not the least-run context anymore.
+ * The "real" head runtime is used instead of the cached runtime so the current
+ * context is not scheduled out when there is less contexts than address spaces.
+ */
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void kbasep_js_policy_runpool_timers_sync(kbasep_js_policy *js_policy)
+{
+ kbasep_js_policy_cfs *policy_info;
+ kbase_device *kbdev;
+ kbasep_js_device_data *js_devdata;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+
+ policy_info = &js_policy->cfs;
+ kbdev = container_of(js_policy, kbase_device, js_data.policy);
+ js_devdata = &kbdev->js_data;
+
+ if (!timer_callback_should_run(kbdev)) {
+ unsigned long flags;
+
+ /* If the timer is running now, synchronize with it by
+ * locking/unlocking its spinlock, to ensure it's not using an old value
+ * from timer_callback_should_run() */
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+
+ /* From now on, return value of timer_callback_should_run() will also
+ * cause the timer to not requeue itself. Its return value cannot
+ * change, because it depends on variables updated with the
+ * runpool_mutex held, which the caller of this must also hold */
+ hrtimer_cancel(&policy_info->scheduling_timer);
+
+ policy_info->timer_running = MALI_FALSE;
+ }
+}
+
+/*
+ * Job Chain Management
+ */
+
+mali_error kbasep_js_policy_init_job(const kbasep_js_policy *js_policy, const kbase_context *kctx, kbase_jd_atom *katom)
+{
+ const kbasep_js_policy_cfs *policy_info;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ policy_info = &js_policy->cfs;
+
+ /* Determine the job's index into the job list head, will return error if the
+ * atom is malformed and so is reported. */
+ return cached_variant_idx_init(policy_info, kctx, katom);
+}
+
+void kbasep_js_policy_register_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom)
+{
+ kbasep_js_policy_cfs_ctx *ctx_info;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ /* Adjust context priority to include the new job */
+ ctx_info->bag_total_nr_atoms++;
+ ctx_info->bag_total_priority += katom->nice_prio;
+
+ /* Get average priority and convert to NICE range -20..19 */
+ if (ctx_info->bag_total_nr_atoms)
+ ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
+}
+
+void kbasep_js_policy_deregister_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom)
+{
+ kbasep_js_policy_cfs_ctx *ctx_info;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ CSTD_UNUSED(js_policy);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ /* Adjust context priority to no longer include removed job */
+ KBASE_DEBUG_ASSERT(ctx_info->bag_total_nr_atoms > 0);
+ ctx_info->bag_total_nr_atoms--;
+ ctx_info->bag_total_priority -= katom->nice_prio;
+ KBASE_DEBUG_ASSERT(ctx_info->bag_total_priority >= 0);
+
+ /* Get average priority and convert to NICE range -20..19 */
+ if (ctx_info->bag_total_nr_atoms)
+ ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
+}
+KBASE_EXPORT_TEST_API(kbasep_js_policy_deregister_job)
+
+mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev,
+ int job_slot_idx,
+ kbase_jd_atom ** const katom_ptr)
+{
+ kbasep_js_device_data *js_devdata;
+ kbasep_js_policy_cfs *policy_info;
+ kbase_context *kctx;
+ u32 variants_supported;
+ struct list_head *pos;
+
+ KBASE_DEBUG_ASSERT(kbdev != NULL);
+ KBASE_DEBUG_ASSERT(katom_ptr != NULL);
+ KBASE_DEBUG_ASSERT(job_slot_idx < BASE_JM_MAX_NR_SLOTS);
+
+ js_devdata = &kbdev->js_data;
+ policy_info = &js_devdata->policy.cfs;
+
+ /* Get the variants for this slot */
+ if (kbdev->gpu_props.num_core_groups > 1 && kbasep_js_ctx_attr_is_attr_on_runpool(kbdev, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) != MALI_FALSE) {
+ /* SS-allcore state, and there's more than one coregroup */
+ variants_supported = get_slot_to_variant_lookup(policy_info->slot_to_variant_lookup_ss_allcore_state, job_slot_idx);
+ } else {
+ /* SS-state */
+ variants_supported = get_slot_to_variant_lookup(policy_info->slot_to_variant_lookup_ss_state, job_slot_idx);
+ }
+
+ /* First pass through the runpool we consider the realtime priority jobs */
+ list_for_each(pos, &policy_info->scheduled_ctxs_head) {
+ kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+ if (kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy) {
+ if (dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx)) {
+ /* Realtime policy job matched */
+ return MALI_TRUE;
+ }
+ }
+ }
+
+ /* Second pass through the runpool we consider the non-realtime priority jobs */
+ list_for_each(pos, &policy_info->scheduled_ctxs_head) {
+ kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+ if (kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy == MALI_FALSE) {
+ if (dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx)) {
+ /* Non-realtime policy job matched */
+ return MALI_TRUE;
+ }
+ }
+ }
+
+ /* By this point, no contexts had a matching job */
+ return MALI_FALSE;
+}
+
+void kbasep_js_policy_enqueue_job(kbasep_js_policy *js_policy, kbase_jd_atom *katom)
+{
+ kbasep_js_policy_cfs_job *job_info;
+ kbasep_js_policy_cfs_ctx *ctx_info;
+ kbase_context *parent_ctx;
+
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ parent_ctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(parent_ctx != NULL);
+
+ job_info = &katom->sched_info.cfs;
+ ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ {
+ kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
+ KBASE_TRACE_ADD(kbdev, JS_POLICY_ENQUEUE_JOB, katom->kctx, katom, katom->jc, 0);
+ }
+ list_add_tail(&katom->sched_info.cfs.list, &ctx_info->job_list_head[job_info->cached_variant_idx]);
+}
+
+void kbasep_js_policy_log_job_result(kbasep_js_policy *js_policy, kbase_jd_atom *katom, u64 time_spent_us)
+{
+ kbasep_js_policy_cfs_ctx *ctx_info;
+ kbase_context *parent_ctx;
+ KBASE_DEBUG_ASSERT(js_policy != NULL);
+ KBASE_DEBUG_ASSERT(katom != NULL);
+ CSTD_UNUSED(js_policy);
+
+ parent_ctx = katom->kctx;
+ KBASE_DEBUG_ASSERT(parent_ctx != NULL);
+
+ ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ ctx_info->runtime_us += priority_weight(ctx_info, time_spent_us);
+}
+
+mali_bool kbasep_js_policy_ctx_has_priority(kbasep_js_policy *js_policy, kbase_context *current_ctx, kbase_context *new_ctx)
+{
+ kbasep_js_policy_cfs_ctx *current_ctx_info;
+ kbasep_js_policy_cfs_ctx *new_ctx_info;
+
+ KBASE_DEBUG_ASSERT(current_ctx != NULL);
+ KBASE_DEBUG_ASSERT(new_ctx != NULL);
+ CSTD_UNUSED(js_policy);
+
+ current_ctx_info = &current_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
+ new_ctx_info = &new_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
+
+ if ((current_ctx_info->process_rt_policy == MALI_FALSE) && (new_ctx_info->process_rt_policy == MALI_TRUE))
+ return MALI_TRUE;
+
+ if ((current_ctx_info->process_rt_policy == new_ctx_info->process_rt_policy) && (current_ctx_info->bag_priority > new_ctx_info->bag_priority))
+ return MALI_TRUE;
+
+ return MALI_FALSE;
+}
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy_cfs.h b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy_cfs.h
new file mode 100755
index 00000000000..45616628253
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_js_policy_cfs.h
@@ -0,0 +1,165 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js_policy_cfs.h
+ * Completely Fair Job Scheduler Policy structure definitions
+ */
+
+#ifndef _KBASE_JS_POLICY_CFS_H_
+#define _KBASE_JS_POLICY_CFS_H_
+
+#define KBASE_JS_POLICY_AVAILABLE_CFS
+
+/** @addtogroup base_api
+ * @{ */
+/** @addtogroup base_kbase_api
+ * @{ */
+/** @addtogroup kbase_js_policy
+ * @{ */
+
+/**
+ * Internally, this policy keeps a few internal queues for different variants
+ * of core requirements, which are used to decide how to schedule onto the
+ * different job slots.
+ *
+ * Must be a power of 2 to keep the lookup math simple
+ */
+#define KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS_LOG2 3
+#define KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS (1u << KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS_LOG2)
+
+/** Bits needed in the lookup to support all slots */
+#define KBASEP_JS_VARIANT_LOOKUP_BITS_NEEDED (BASE_JM_MAX_NR_SLOTS * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS)
+/** Number of u32s needed in the lookup array to support all slots */
+#define KBASEP_JS_VARIANT_LOOKUP_WORDS_NEEDED ((KBASEP_JS_VARIANT_LOOKUP_BITS_NEEDED + 31) / 32)
+
+#define KBASEP_JS_RUNTIME_EMPTY ((u64)-1)
+
+typedef struct kbasep_js_policy_cfs {
+ /** List of all contexts in the context queue. Hold
+ * kbasep_js_device_data::queue_mutex whilst accessing. */
+ struct list_head ctx_queue_head;
+
+ /** List of all contexts in the realtime (priority) context queue */
+ struct list_head ctx_rt_queue_head;
+
+ /** List of scheduled contexts. Hold kbasep_jd_device_data::runpool_irq::lock
+ * whilst accessing, which is a spinlock */
+ struct list_head scheduled_ctxs_head;
+
+ /** Number of valid elements in the core_req_variants member, and the
+ * kbasep_js_policy_rr_ctx::job_list_head array */
+ u32 num_core_req_variants;
+
+ /** Variants of the core requirements */
+ kbasep_atom_req core_req_variants[KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS];
+
+ /* Lookups per job slot against which core_req_variants match it */
+ u32 slot_to_variant_lookup_ss_state[KBASEP_JS_VARIANT_LOOKUP_WORDS_NEEDED];
+ u32 slot_to_variant_lookup_ss_allcore_state[KBASEP_JS_VARIANT_LOOKUP_WORDS_NEEDED];
+
+ /* The timer tick used for rescheduling jobs */
+ struct hrtimer scheduling_timer;
+
+ /* Is the timer running?
+ *
+ * The kbasep_js_device_data::runpool_mutex must be held whilst modifying this. */
+ mali_bool timer_running;
+
+ /* Number of us the least-run context has been running for
+ *
+ * The kbasep_js_device_data::queue_mutex must be held whilst updating this
+ * Reads are possible without this mutex, but an older value might be read
+ * if no memory barriers are issued beforehand. */
+ u64 head_runtime_us;
+
+ /* Number of us the least-run context in the context queue has been running for.
+ * -1 if context queue is empty. */
+ atomic64_t least_runtime_us;
+
+ /* Number of us the least-run context in the realtime (priority) context queue
+ * has been running for. -1 if realtime context queue is empty. */
+ atomic64_t rt_least_runtime_us;
+} kbasep_js_policy_cfs;
+
+/**
+ * This policy contains a single linked list of all contexts.
+ */
+typedef struct kbasep_js_policy_cfs_ctx {
+ /** Link implementing the Policy's Queue, and Currently Scheduled list */
+ struct list_head list;
+
+ /** Job lists for use when in the Run Pool - only using
+ * kbasep_js_policy_fcfs::num_unique_slots of them. We still need to track
+ * the jobs when we're not in the runpool, so this member is accessed from
+ * outside the policy queue (for the first job), inside the policy queue,
+ * and inside the runpool.
+ *
+ * If the context is in the runpool, then this must only be accessed with
+ * kbasep_js_device_data::runpool_irq::lock held
+ *
+ * Jobs are still added to this list even when the context is not in the
+ * runpool. In that case, the kbasep_js_kctx_info::ctx::jsctx_mutex must be
+ * held before accessing this. */
+ struct list_head job_list_head[KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS];
+
+ /** Number of us this context has been running for
+ *
+ * The kbasep_js_device_data::runpool_irq::lock (a spinlock) must be held
+ * whilst updating this. Initializing will occur on context init and
+ * context enqueue (which can only occur in one thread at a time), but
+ * multi-thread access only occurs while the context is in the runpool.
+ *
+ * Reads are possible without this spinlock, but an older value might be read
+ * if no memory barriers are issued beforehand */
+ u64 runtime_us;
+
+ /* Calling process policy scheme is a realtime scheduler and will use the priority queue
+ * Non-mutable after ctx init */
+ mali_bool process_rt_policy;
+ /* Calling process NICE priority */
+ int process_priority;
+ /* Average NICE priority of all atoms in bag:
+ * Hold the kbasep_js_kctx_info::ctx::jsctx_mutex when accessing */
+ int bag_priority;
+ /* Total NICE priority of all atoms in bag
+ * Hold the kbasep_js_kctx_info::ctx::jsctx_mutex when accessing */
+ int bag_total_priority;
+ /* Total number of atoms in the bag
+ * Hold the kbasep_js_kctx_info::ctx::jsctx_mutex when accessing */
+ int bag_total_nr_atoms;
+
+} kbasep_js_policy_cfs_ctx;
+
+/**
+ * In this policy, each Job is part of at most one of the per_corereq lists
+ */
+typedef struct kbasep_js_policy_cfs_job {
+ struct list_head list; /**< Link implementing the Run Pool list/Jobs owned by the ctx */
+ u32 cached_variant_idx; /**< Cached index of the list this should be entered into on re-queue */
+
+ /** Number of ticks that this job has been executing for
+ *
+ * To access this, the kbasep_js_device_data::runpool_irq::lock must be held */
+ u32 ticks;
+} kbasep_js_policy_cfs_job;
+
+ /** @} *//* end group kbase_js_policy */
+ /** @} *//* end group base_kbase_api */
+ /** @} *//* end group base_api */
+
+#endif
diff --git a/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem.c b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem.c
new file mode 100755
index 00000000000..0e4fbd854b5
--- /dev/null
+++ b/drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_mem.c
@@ -0,0 +1,2034 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem.c
+ * Base kernel memory APIs
+ */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+#include <kbase/mali_kbase_config.h>
+#include <kbase/src/common/mali_kbase.h>
+#include <kbase/src/common/mali_midg_regmap.h>
+#include <kbase/src/common/mali_kbase_cache_policy.h>
+#include <kbase/src/common/mali_kbase_hw.h>
+#include <kbase/src/common/mali_kbase_gator.h>
+
+/**
+ * @brief Check the zone compatibility of two regions.
+ */
+STATIC int kbase_region_tracker_match_zone(struct kbase_va_region *reg1, struct kbase_va_region *reg2)
+{
+ return ((reg1->flags & KBASE_REG_ZONE_MASK) == (reg2->flags & KBASE_REG_ZONE_MASK));
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_match_zone)
+
+/* This function inserts a region into the tree. */
+static void kbase_region_tracker_insert(struct kbase_context *kctx, struct kbase_va_region *new_reg)
+{
+ u64 start_pfn = new_reg->start_pfn;
+ struct rb_node **link = &(kctx->reg_rbtree.rb_node);
+ struct rb_node *parent = NULL;
+
+ /* Find the right place in the tree using tree search */
+ while (*link) {
+ struct kbase_va_region *old_reg;
+
+ parent = *link;
+ old_reg = rb_entry(parent, struct kbase_va_region, rblink);
+
+ /* RBTree requires no duplicate entries. */
+ KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
+
+ if (old_reg->start_pfn > start_pfn)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ /* Put the new node there, and rebalance tree */
+ rb_link_node(&(new_reg->rblink), parent, link);
+ rb_insert_color(&(new_reg->rblink), &(kctx->reg_rbtree));
+}
+
+/* Find allocated region enclosing range. */
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range(kbase_context *kctx, u64 start_pfn, u32 nr_pages)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+ u64 end_pfn = start_pfn + nr_pages;
+
+ rbnode = kctx->reg_rbtree.rb_node;
+ while (rbnode) {
+ u64 tmp_start_pfn, tmp_end_pfn;
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ tmp_start_pfn = reg->start_pfn;
+ tmp_end_pfn = reg->start_pfn + reg->nr_alloc_pages;
+
+ /* If start is lower than this, go left. */
+ if (start_pfn < tmp_start_pfn)
+ rbnode = rbnode->rb_left;
+ /* If end is higher than this, then go right. */
+ else if (end_pfn > tmp_end_pfn)
+ rbnode = rbnode->rb_right;
+ else /* Enclosing */
+ return reg;
+ }
+
+ return NULL;
+}
+
+/* Find allocated region enclosing free range. */
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free(kbase_context *kctx, u64 start_pfn, u32 nr_pages)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+ u64 end_pfn = start_pfn + nr_pages;
+
+ rbnode = kctx->reg_rbtree.rb_node;
+ while (rbnode) {
+ u64 tmp_start_pfn, tmp_end_pfn;
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ tmp_start_pfn = reg->start_pfn;
+ tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+ /* If start is lower than this, go left. */
+ if (start_pfn < tmp_start_pfn)
+ rbnode = rbnode->rb_left;
+ /* If end is higher than this, then go right. */
+ else if (end_pfn > tmp_end_pfn)
+ rbnode = rbnode->rb_right;
+ else /* Enclosing */
+ return reg;
+ }
+
+ return NULL;
+}
+
+/* Find region enclosing given address. */
+kbase_va_region *kbase_region_tracker_find_region_enclosing_address(kbase_context *kctx, mali_addr64 gpu_addr)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ rbnode = kctx->reg_rbtree.rb_node;
+ while (rbnode) {
+ u64 tmp_start_pfn, tmp_end_pfn;
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ tmp_start_pfn = reg->start_pfn;
+ tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+ /* If start is lower than this, go left. */
+ if (gpu_pfn < tmp_start_pfn)
+ rbnode = rbnode->rb_left;
+ /* If end is higher than this, then go right. */
+ else if (gpu_pfn >= tmp_end_pfn)
+ rbnode = rbnode->rb_right;
+ else /* Enclosing */
+ return reg;
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address)
+
+/* Find region with given base address */
+kbase_va_region *kbase_region_tracker_find_region_base_address(kbase_context *kctx, mali_addr64 gpu_addr)
+{
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+
+ rbnode = kctx->reg_rbtree.rb_node;
+ while (rbnode) {
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ if (reg->start_pfn > gpu_pfn)
+ rbnode = rbnode->rb_left;
+ else if (reg->start_pfn < gpu_pfn)
+ rbnode = rbnode->rb_right;
+ else if (gpu_pfn == reg->start_pfn)
+ return reg;
+ else
+ rbnode = NULL;
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address)
+
+/* Find region meeting given requirements */
+static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(kbase_context *kctx, struct kbase_va_region *reg_reqs, u32 nr_pages, u32 align)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+
+ /* Note that this search is a linear search, as we do not have a target
+ address in mind, so does not benefit from the rbtree search */
+ rbnode = rb_first(&(kctx->reg_rbtree));
+ while (rbnode) {
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ if ((reg->nr_pages >= nr_pages) && (reg->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(reg, reg_reqs)) {
+
+ /* Check alignment */
+ u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1);
+ if ((start_pfn >= reg->start_pfn) && (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) && ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1)))
+ return reg;
+ }
+ rbnode = rb_next(rbnode);
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Remove a region object from the global list.
+ *
+ * The region reg is removed, possibly by merging with other free and
+ * compatible adjacent regions. It must be called with the context
+ * region lock held. The associated memory is not released (see
+ * kbase_free_alloced_region). Internal use only.
+ */
+STATIC mali_error kbase_remove_va_region(kbase_context *kctx, struct kbase_va_region *reg)
+{
+ struct rb_node *rbprev;
+ struct kbase_va_region *prev = NULL;
+ struct rb_node *rbnext;
+ struct kbase_va_region *next = NULL;
+
+ int merged_front = 0;
+ int merged_back = 0;
+ mali_error err = MALI_ERROR_NONE;
+
+ /* Try to merge with the previous block first */
+ rbprev = rb_prev(&(reg->rblink));
+ if (rbprev) {
+ prev = rb_entry(rbprev, struct kbase_va_region, rblink);
+ if ((prev->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(prev, reg)) {
+ /* We're compatible with the previous VMA, merge with it */
+ prev->nr_pages += reg->nr_pages;
+ rb_erase(&(reg->rblink), &kctx->reg_rbtree);
+ reg = prev;
+ merged_front = 1;
+ }
+ }
+
+ /* Try to merge with the next block second */
+ /* Note we do the lookup here as the tree may have been rebalanced. */
+ rbnext = rb_next(&(reg->rblink));
+ if (rbnext) {
+ /* We're compatible with the next VMA, merge with it */
+ next = rb_entry(rbnext, struct kbase_va_region, rblink);
+ if ((next->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(next, reg)) {
+ next->start_pfn = reg->start_pfn;
+ next->nr_pages += reg->nr_pages;
+ rb_erase(&(reg->rblink), &kctx->reg_rbtree);
+ merged_back = 1;
+ if (merged_front) {
+ /* We already merged with prev, free it */
+ kbase_free_alloced_region(reg);
+ }
+ }
+ }
+
+ /* If we failed to merge then we need to add a new block */
+ if (!(merged_front || merged_back)) {
+ /*
+ * We didn't merge anything. Add a new free
+ * placeholder and remove the original one.
+ */
+ struct kbase_va_region *free_reg;
+
+ free_reg = kbase_alloc_free_region(kctx, reg->start_pfn, reg->nr_pages, reg->flags & KBASE_REG_ZONE_MASK);
+ if (!free_reg) {
+ err = MALI_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ rb_replace_node(&(reg->rblink), &(free_reg->rblink), &(kctx->reg_rbtree));
+ }
+
+ out:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_remove_va_region)
+
+/**
+ * @brief Insert a VA region to the list, replacing the current at_reg.
+ */
+static mali_error kbase_insert_va_region_nolock(kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, u32 nr_pages)
+{
+ mali_error err = MALI_ERROR_NONE;
+
+ /* Must be a free region */
+ KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0);
+ /* start_pfn should be contained within at_reg */
+ KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages));
+ /* at least nr_pages from start_pfn should be contained within at_reg */
+ KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages);
+
+ new_reg->start_pfn = start_pfn;
+ new_reg->nr_pages = nr_pages;
+
+ /* Regions are a whole use, so swap and delete old one. */
+ if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
+ rb_replace_node(&(at_reg->rblink), &(new_reg->rblink), &(kctx->reg_rbtree));
+ kbase_free_alloced_region(at_reg);
+ }
+ /* New region replaces the start of the old one, so insert before. */
+ else if (at_reg->start_pfn == start_pfn) {
+ at_reg->start_pfn += nr_pages;
+ KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages);
+ at_reg->nr_pages -= nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_reg);
+ }
+ /* New region replaces the end of the old one, so insert after. */
+ else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) {
+ at_reg->nr_pages -= nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_reg);
+ }
+ /* New region splits the old one, so insert and create new */
+ else {
+ struct kbase_va_region *new_front_reg = kbase_alloc_free_region(kctx, at_reg->start_pfn, start_pfn - at_reg->start_pfn, at_reg->flags & KBASE_REG_ZONE_MASK);
+ if (new_front_reg) {
+ at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
+ at_reg->start_pfn = start_pfn + nr_pages;
+
+ kbase_region_tracker_insert(kctx, new_front_reg);
+ kbase_region_tracker_insert(kctx, new_reg);
+ } else {
+ err = MALI_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * @brief Add a VA region to the list.
+ */
+mali_error kbase_add_va_region(kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, u32 nr_pages, u32 align)
+{
+ struct kbase_va_region *tmp;
+ u64 gpu_pfn = addr >> PAGE_SHIFT;
+ mali_error err = MALI_ERROR_NONE;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ if (!align)
+ align = 1;
+
+ /* must be a power of 2 */
+ KBASE_DEBUG_ASSERT((align & (align - 1)) == 0);
+ KBASE_DEBUG_ASSERT(nr_pages > 0);
+
+ /* Path 1: Map a specific address. Find the enclosing region, which *must* be free. */
+ if (gpu_pfn) {
+ KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
+
+ tmp = kbase_region_tracker_find_region_enclosing_range_free(kctx, gpu_pfn, nr_pages);
+ if (!tmp) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "Enclosing region not found: %08x gpu_pfn, %u nr_pages", (unsigned int)gpu_pfn, nr_pages);
+ err = MALI_ERROR_OUT_OF_GPU_MEMORY;
+ goto exit;
+ }
+
+ if ((!kbase_region_tracker_match_zone(tmp, reg)) || (!(tmp->flags & KBASE_REG_FREE))) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "Zone mismatch: %lu != %lu", tmp->flags & KBASE_REG_ZONE_MASK, reg->flags & KBASE_REG_ZONE_MASK);
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%x gpu_pfn=0x%llx nr_pages=0x%x\n", tmp->start_pfn, tmp->flags, tmp->nr_pages, gpu_pfn, nr_pages);
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "in function %s (%p, %p, 0x%llx, 0x%x, 0x%x)\n", __func__, kctx, reg, addr, nr_pages, align);
+ err = MALI_ERROR_OUT_OF_GPU_MEMORY;
+ goto exit;
+ }
+
+ err = kbase_insert_va_region_nolock(kctx, reg, tmp, gpu_pfn, nr_pages);
+ if (err) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "Failed to insert va region");
+ err = MALI_ERROR_OUT_OF_GPU_MEMORY;
+ goto exit;
+ }
+
+ goto exit;
+ }
+
+ /* Path 2: Map any free address which meets the requirements. */
+ {
+ u64 start_pfn;
+ tmp = kbase_region_tracker_find_region_meeting_reqs(kctx, reg, nr_pages, align);
+ if (!tmp) {
+ err = MALI_ERROR_OUT_OF_GPU_MEMORY;
+ goto exit;
+ }
+ start_pfn = (tmp->start_pfn + align - 1) & ~(align - 1);
+ err = kbase_insert_va_region_nolock(kctx, reg, tmp, start_pfn, nr_pages);
+ }
+
+ exit:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_add_va_region)
+
+/**
+ * @brief Initialize the internal region tracker data structure.
+ */
+static void kbase_region_tracker_ds_init(kbase_context *kctx, struct kbase_va_region *pmem_reg, struct kbase_va_region *exec_reg, struct kbase_va_region *tmem_reg)
+{
+ kctx->reg_rbtree = RB_ROOT;
+ kbase_region_tracker_insert(kctx, pmem_reg);
+ kbase_region_tracker_insert(kctx, exec_reg);
+ kbase_region_tracker_insert(kctx, tmem_reg);
+}
+
+void kbase_region_tracker_term(kbase_context *kctx)
+{
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
+ do {
+ rbnode = rb_first(&(kctx->reg_rbtree));
+ if (rbnode) {
+ rb_erase(rbnode, &(kctx->reg_rbtree));
+ reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ kbase_free_alloced_region(reg);
+ }
+ } while (rbnode);
+}
+
+/**
+ * Initialize the region tracker data structure.
+ */
+mali_error kbase_region_tracker_init(kbase_context *kctx)
+{
+ struct kbase_va_region *pmem_reg;
+ struct kbase_va_region *exec_reg;
+ struct kbase_va_region *tmem_reg;
+
+ /* Make sure page 0 is not used... */
+ pmem_reg = kbase_alloc_free_region(kctx, 1, KBASE_REG_ZONE_EXEC_BASE - 1, KBASE_REG_ZONE_PMEM);
+ if (!pmem_reg)
+ return MALI_ERROR_OUT_OF_MEMORY;
+
+ exec_reg = kbase_alloc_free_region(kctx, KBASE_REG_ZONE_EXEC_BASE, KBASE_REG_ZONE_EXEC_SIZE, KBASE_REG_ZONE_EXEC);
+ if (!exec_reg) {
+ kbase_free_alloced_region(pmem_reg);
+ return MALI_ERROR_OUT_OF_MEMORY;
+ }
+
+ tmem_reg = kbase_alloc_free_region(kctx, KBASE_REG_ZONE_TMEM_BASE, KBASE_REG_ZONE_TMEM_SIZE, KBASE_REG_ZONE_TMEM);
+ if (!tmem_reg) {
+ kbase_free_alloced_region(pmem_reg);
+ kbase_free_alloced_region(exec_reg);
+ return MALI_ERROR_OUT_OF_MEMORY;
+ }
+
+ kbase_region_tracker_ds_init(kctx, pmem_reg, exec_reg, tmem_reg);
+
+ return MALI_ERROR_NONE;
+}
+
+mali_error kbase_mem_init(struct kbase_device *kbdev)
+{
+ kbasep_mem_device *memdev;
+ u32 max_shared_memory;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ memdev = &kbdev->memdev;
+
+ max_shared_memory = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_MAX);
+
+ if (MALI_ERROR_NONE != kbase_mem_usage_init(&memdev->usage, max_shared_memory >> PAGE_SHIFT)) {
+ return MALI_ERROR_FUNCTION_FAILED;
+ }
+
+ /* nothing to do, zero-inited when kbase_device was created */
+ return MALI_ERROR_NONE;
+}
+
+void kbase_mem_halt(kbase_device *kbdev)
+{
+ CSTD_UNUSED(kbdev);
+}
+
+void kbase_mem_term(kbase_device *kbdev)
+{
+ kbasep_mem_device *memdev;
+ KBASE_DEBUG_ASSERT(kbdev);
+
+ memdev = &kbdev->memdev;
+
+ kbase_mem_usage_term(&memdev->usage);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_term)
+
+mali_error kbase_mem_usage_init(struct kbasep_mem_usage *usage, u32 max_pages)
+{
+ KBASE_DEBUG_ASSERT(usage);
+
+ atomic_set(&usage->cur_pages, 0);
+ /* query the max page count */
+ usage->max_pages = max_pages;
+
+ return MALI_ERROR_NONE;
+}
+
+void kbase_mem_usage_term(kbasep_mem_usage *usage)
+{
+ KBASE_DEBUG_ASSERT(usage);
+ /* No memory should be in use now */
+ KBASE_DEBUG_ASSERT(0 == atomic_read(&usage->cur_pages));
+ /* So any new alloc requests will fail */
+ usage->max_pages = 0;
+ /* So we assert on double term */
+ atomic_set(&usage->cur_pages, INT_MAX);
+}
+
+mali_error kbase_mem_usage_request_pages(kbasep_mem_usage *usage, u32 nr_pages)
+{
+ int cur_pages;
+ int old_cur_pages;
+
+ KBASE_DEBUG_ASSERT(usage);
+ KBASE_DEBUG_ASSERT(nr_pages); /* 0 pages would be an error in the calling code */
+
+ /*
+ * Fetch the initial cur_pages value
+ * each loop iteration below fetches
+ * it as part of the store attempt
+ */
+ cur_pages = atomic_read(&usage->cur_pages);
+
+ /* this check allows the simple if test in the loop below */
+ if (usage->max_pages < nr_pages)
+ goto usage_cap_exceeded;
+
+ do {
+ u32 new_cur_pages;
+
+ /* enough pages to fullfill the request? */
+ if (usage->max_pages - nr_pages < cur_pages) {
+ usage_cap_exceeded:
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "Memory usage cap has been reached:\n" "\t%lu pages currently used\n" "\t%lu pages usage cap\n" "\t%lu new pages requested\n" "\twould result in %lu pages over the cap\n", (unsigned long)cur_pages, (unsigned long)usage->max_pages, (unsigned long)nr_pages, (unsigned long)(cur_pages + nr_pages - usage->max_pages));
+ return MALI_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* try to atomically commit the new count */
+ old_cur_pages = cur_pages;
+ new_cur_pages = cur_pages + nr_pages;
+ cur_pages = atomic_cmpxchg(&usage->cur_pages, old_cur_pages, new_cur_pages);
+ /* cur_pages will be like old_cur_pages if there was no race */
+ } while (cur_pages != old_cur_pages);
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+ kbase_trace_mali_total_alloc_pages_change((long long int)cur_pages);
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+ return MALI_ERROR_NONE;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_usage_request_pages)
+
+void kbase_mem_usage_release_pages(kbasep_mem_usage *usage, u32 nr_pages)
+{
+ KBASE_DEBUG_ASSERT(usage);
+ KBASE_DEBUG_ASSERT(nr_pages <= atomic_read(&usage->cur_pages));
+
+ atomic_sub(nr_pages, &usage->cur_pages);
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+ kbase_trace_mali_total_alloc_pages_change((long long int)atomic_read(&usage->cur_pages));
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_usage_release_pages)
+
+/**
+ * @brief Wait for GPU write flush - only in use for BASE_HW_ISSUE_6367
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush its write buffer.
+ * @note If GPU resets occur then the counters are reset to zero, the delay may not be as expected.
+ */
+#ifdef CONFIG_MALI_NO_MALI
+static void kbase_wait_write_flush(kbase_context *kctx)
+{
+}
+#else /* CONFIG_MALI_NO_MALI */
+static void kbase_wait_write_flush(kbase_context *kctx)
+{
+ u32 base_count = 0;
+ /* A suspend won't happen here, because we're in a syscall from a userspace thread */
+ kbase_pm_context_active(kctx->kbdev);
+ kbase_pm_request_gpu_cycle_counter(kctx->kbdev);
+ while (MALI_TRUE) {
+ u32 new_count;
+ new_count = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
+ /* First time around, just store the count. */
+ if (base_count == 0) {
+ base_count = new_count;
+ continue;
+ }
+
+ /* No need to handle wrapping, unsigned maths works for this. */
+ if ((new_count - base_count) > 1000)
+ break;
+ }
+ kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
+ kbase_pm_context_idle(kctx->kbdev);
+}
+#endif /* CONFIG_MALI_NO_MALI */
+
+/**
+ * @brief Allocate a free region object.
+ *
+ * The allocated object is not part of any list yet, and is flagged as
+ * KBASE_REG_FREE. No mapping is allocated yet.
+ *
+ * zone is KBASE_REG_ZONE_TMEM, KBASE_REG_ZONE_PMEM, or KBASE_REG_ZONE_EXEC
+ *
+ */
+struct kbase_va_region *kbase_alloc_free_region(kbase_context *kctx, u64 start_pfn, u32 nr_pages, u32 zone)
+{
+ struct kbase_va_region *new_reg;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ /* zone argument should only contain zone related region flags */
+ KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0);
+ KBASE_DEBUG_ASSERT(nr_pages > 0);
+ KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (UINT64_MAX / PAGE_SIZE)); /* 64-bit address range is the max */
+
+ new_reg = kzalloc(sizeof(*new_reg), GFP_KERNEL);
+
+ if (!new_reg) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "kzalloc failed");
+ return NULL;
+ }
+
+ new_reg->kctx = kctx;
+ new_reg->flags = zone | KBASE_REG_FREE;
+
+ if (KBASE_REG_ZONE_TMEM == zone || KBASE_REG_ZONE_EXEC == zone)
+ new_reg->flags |= KBASE_REG_GROWABLE;
+
+ /* not imported by default */
+ new_reg->imported_type = BASE_TMEM_IMPORT_TYPE_INVALID;
+
+ new_reg->start_pfn = start_pfn;
+ new_reg->nr_pages = nr_pages;
+ new_reg->nr_alloc_pages = 0;
+ INIT_LIST_HEAD(&new_reg->map_list);
+
+ return new_reg;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_free_region)
+
+/**
+ * @brief Free a region object.
+ *
+ * The described region must be freed of any mapping.
+ *
+ * If the region is not flagged as KBASE_REG_FREE, the destructor
+ * kbase_free_phy_pages() will be called.
+ */
+void kbase_free_alloced_region(struct kbase_va_region *reg)
+{
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(list_empty(&reg->map_list));
+ if (!(reg->flags & KBASE_REG_FREE)) {
+ kbase_free_phy_pages(reg);
+ KBASE_DEBUG_CODE(
+ /* To detect use-after-free in debug builds */
+ reg->flags |= KBASE_REG_FREE);
+ }
+ kfree(reg);
+}
+
+KBASE_EXPORT_TEST_API(kbase_free_alloced_region)
+
+void kbase_mmu_update(kbase_context *kctx)
+{
+ /* Use GPU implementation-defined caching policy. */
+ u32 memattr = ASn_MEMATTR_IMPL_DEF_CACHE_POLICY;
+ u32 pgd_high;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ /* ASSERT that the context has a valid as_nr, which is only the case
+ * when it's scheduled in.
+ *
+ * as_nr won't change because the caller has the runpool_irq lock */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ pgd_high = sizeof(kctx->pgd) > 4 ? (kctx->pgd >> 32) : 0;
+
+ kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_TRANSTAB_LO), (kctx->pgd & ASn_TRANSTAB_ADDR_SPACE_MASK) | ASn_TRANSTAB_READ_INNER | ASn_TRANSTAB_ADRMODE_TABLE, kctx);
+
+ /* Need to use a conditional expression to avoid "right shift count >= width of type"
+ * error when using an if statement - although the size_of condition is evaluated at compile
+ * time the unused branch is not removed until after it is type-checked and the error
+ * produced.
+ */
+ pgd_high = sizeof(kctx->pgd) > 4 ? (kctx->pgd >> 32) : 0;
+
+ kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_TRANSTAB_HI), pgd_high, kctx);
+
+ kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_MEMATTR_LO), memattr, kctx);
+ kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_MEMATTR_HI), memattr, kctx);
+ kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UPDATE, kctx);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_update)
+
+void kbase_mmu_disable(kbase_context *kctx)
+{
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ /* ASSERT that the context has a valid as_nr, which is only the case
+ * when it's scheduled in.
+ *
+ * as_nr won't change because the caller has the runpool_irq lock */
+ KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+ kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_TRANSTAB_LO), 0, kctx);
+ kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_TRANSTAB_HI), 0, kctx);
+ kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UPDATE, kctx);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_disable)
+
+mali_error kbase_gpu_mmap(kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, u32 nr_pages, u32 align)
+{
+ mali_error err;
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ err = kbase_add_va_region(kctx, reg, addr, nr_pages, align);
+ if (MALI_ERROR_NONE != err)
+ return err;
+
+ err = kbase_mmu_insert_pages(kctx, reg->start_pfn, kbase_get_phy_pages(reg), reg->nr_alloc_pages, reg->flags & ((1 << KBASE_REG_FLAGS_NR_BITS) - 1));
+ if (MALI_ERROR_NONE != err)
+ kbase_remove_va_region(kctx, reg);
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_mmap)
+
+mali_error kbase_gpu_munmap(kbase_context *kctx, struct kbase_va_region *reg)
+{
+ mali_error err;
+
+ if (reg->start_pfn == 0)
+ return MALI_ERROR_NONE;
+
+ err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_alloc_pages);
+ if (MALI_ERROR_NONE != err)
+ return err;
+
+ err = kbase_remove_va_region(kctx, reg);
+ return err;
+}
+
+/**
+ * @brief Find a mapping keyed with ptr in region reg
+ */
+STATIC struct kbase_cpu_mapping *kbase_find_cpu_mapping(struct kbase_va_region *reg, struct vm_area_struct * vma)
+{
+ struct kbase_cpu_mapping *map;
+ struct list_head *pos;
+
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ list_for_each(pos, &reg->map_list) {
+ map = list_entry(pos, kbase_cpu_mapping, link);
+ if (map->private == vma)
+ return map;
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbase_find_cpu_mapping)
+
+STATIC struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping_of_region(const struct kbase_va_region *reg, void *uaddr, size_t size)
+{
+ struct kbase_cpu_mapping *map;
+ struct list_head *pos;
+
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */
+ return NULL;
+
+ list_for_each(pos, &reg->map_list) {
+ map = list_entry(pos, kbase_cpu_mapping, link);
+ if (map->uaddr <= uaddr && ((uintptr_t) map->uaddr + (map->nr_pages << PAGE_SHIFT)) >= ((uintptr_t) uaddr + size))
+ return map;
+ }
+
+ return NULL;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_of_region)
+
+static void kbase_dump_mappings(struct kbase_va_region *reg)
+{
+ struct kbase_cpu_mapping *map;
+ struct list_head *pos;
+
+ KBASE_DEBUG_ASSERT(NULL != reg);
+
+ list_for_each(pos, &reg->map_list) {
+ map = list_entry(pos, kbase_cpu_mapping, link);
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "uaddr %p nr_pages %d page_off %016llx vma %p\n", map->uaddr, map->nr_pages, map->page_off, map->private);
+ }
+}
+
+/**
+ * @brief Delete a mapping keyed with ptr in region reg
+ */
+mali_error kbase_cpu_free_mapping(struct kbase_va_region *reg, struct vm_area_struct * vma)
+{
+ struct kbase_cpu_mapping *map;
+ mali_error err = MALI_ERROR_NONE;
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ map = kbase_find_cpu_mapping(reg, vma);
+ if (!map) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "Freeing unknown mapping %p in region %p\n", vma, (void *)reg);
+ kbase_dump_mappings(reg);
+ err = MALI_ERROR_FUNCTION_FAILED;
+ goto out;
+ }
+
+ /* As the tmem is being unmapped we need to update the pages used by the process */
+ if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM )
+ {
+ kbase_process_page_usage_inc(reg->kctx, map->nr_pages);
+ }
+
+ list_del(&map->link);
+ kfree(map);
+
+ if ((reg->flags & KBASE_REG_DELAYED_FREE) && list_empty(&reg->map_list))
+ err = kbase_mem_free_region(reg->kctx, reg);
+
+ out:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_cpu_free_mapping)
+
+struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(kbase_context *kctx, mali_addr64 gpu_addr, void *uaddr, size_t size)
+{
+ struct kbase_cpu_mapping *map = NULL;
+ const struct kbase_va_region *reg;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ kbase_gpu_vm_lock(kctx);
+
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+ if (NULL != reg)
+ map = kbasep_find_enclosing_cpu_mapping_of_region(reg, uaddr, size);
+
+ kbase_gpu_vm_unlock(kctx);
+
+ return map;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping)
+
+static mali_error kbase_do_syncset(kbase_context *kctx, struct base_syncset *set, kbase_sync_kmem_fn sync_fn)
+{
+ mali_error err = MALI_ERROR_NONE;
+ struct basep_syncset *sset = &set->basep_sset;
+ struct kbase_va_region *reg;
+ struct kbase_cpu_mapping *map;
+ phys_addr_t *pa;
+ u64 page_off, page_count;
+ void *start;
+ size_t size;
+ u64 i;
+ u32 offset_within_page;
+ phys_addr_t base_phy_addr = 0;
+ void *base_virt_addr = 0;
+ size_t area_size = 0;
+
+ kbase_os_mem_map_lock(kctx);
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* find the region where the virtual address is contained */
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, sset->mem_handle);
+ if (!reg) {
+ err = MALI_ERROR_FUNCTION_FAILED;
+ goto out_unlock;
+ }
+
+ if (!(reg->flags & KBASE_REG_CPU_CACHED))
+ goto out_unlock;
+
+ start = (void *)(uintptr_t) sset->user_addr;
+ size = (size_t) sset->size;
+
+ map = kbasep_find_enclosing_cpu_mapping_of_region(reg, start, size);
+ if (!map) {
+ err = MALI_ERROR_FUNCTION_FAILED;
+ goto out_unlock;
+ }
+
+ offset_within_page = (uintptr_t) start & (PAGE_SIZE - 1);
+ page_off = map->page_off + (((uintptr_t) start - (uintptr_t) map->uaddr) >> PAGE_SHIFT);
+ page_count = ((size + offset_within_page + (PAGE_SIZE - 1)) & PAGE_MASK) >> PAGE_SHIFT;
+ pa = kbase_get_phy_pages(reg);
+
+ for (i = 0; i < page_count; i++) {
+ u32 offset = (uintptr_t) start & (PAGE_SIZE - 1);
+ phys_addr_t paddr = pa[page_off + i] + offset;
+ size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+
+ if (paddr == base_phy_addr + area_size && start == (void *)((uintptr_t) base_virt_addr + area_size)) {
+ area_size += sz;
+ } else if (area_size > 0) {
+ sync_fn(base_phy_addr, base_virt_addr, area_size);
+ area_size = 0;
+ }
+
+ if (area_size == 0) {
+ base_phy_addr = paddr;
+ base_virt_addr = start;
+ area_size = sz;
+ }
+
+ start = (void *)((uintptr_t) start + sz);
+ size -= sz;
+ }
+
+ if (area_size > 0)
+ sync_fn(base_phy_addr, base_virt_addr, area_size);
+
+ KBASE_DEBUG_ASSERT(size == 0);
+
+ out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ kbase_os_mem_map_unlock(kctx);
+ return err;
+}
+
+mali_error kbase_sync_now(kbase_context *kctx, struct base_syncset *syncset)
+{
+ mali_error err = MALI_ERROR_FUNCTION_FAILED;
+ struct basep_syncset *sset;
+
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != syncset);
+
+ sset = &syncset->basep_sset;
+
+ switch (sset->type) {
+ case BASE_SYNCSET_OP_MSYNC:
+ err = kbase_do_syncset(kctx, syncset, kbase_sync_to_memory);
+ break;
+
+ case BASE_SYNCSET_OP_CSYNC:
+ err = kbase_do_syncset(kctx, syncset, kbase_sync_to_cpu);
+ break;
+
+ default:
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "Unknown msync op %d\n", sset->type);
+ break;
+ }
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_sync_now)
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static mali_bool is_actively_imported_umm(kbase_va_region *reg)
+{
+ if (reg->imported_type == BASE_TMEM_IMPORT_TYPE_UMM) {
+ if (reg->imported_metadata.umm.current_mapping_usage_count > 0)
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+#else
+static mali_bool is_actively_imported_umm(kbase_va_region *reg)
+{
+ return MALI_FALSE;
+}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+/* vm lock must be held */
+mali_error kbase_mem_free_region(kbase_context *kctx, kbase_va_region *reg)
+{
+ mali_error err;
+ KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ BUG_ON(!mutex_is_locked(&kctx->reg_lock));
+
+ if (!list_empty(&reg->map_list) || is_actively_imported_umm(reg)) {
+ /*
+ * We still have mappings, can't free memory. This also handles the race condition with the unmap code
+ * (see kbase_cpu_vm_close()).
+ */
+ err = MALI_ERROR_NONE;
+ reg->flags |= KBASE_REG_DELAYED_FREE;
+ goto out;
+ }
+
+ err = kbase_gpu_munmap(kctx, reg);
+ if (err) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "Could not unmap from the GPU...\n");
+ goto out;
+ }
+
+ if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367)) {
+ /* Wait for GPU to flush write buffer before freeing physical pages */
+ kbase_wait_write_flush(kctx);
+ }
+
+ /* This will also free the physical pages */
+ kbase_free_alloced_region(reg);
+
+ out:
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free_region)
+
+/**
+ * @brief Free the region from the GPU and unregister it.
+ *
+ * This function implements the free operation on a memory segment.
+ * It will loudly fail if called with outstanding mappings.
+ */
+mali_error kbase_mem_free(kbase_context *kctx, mali_addr64 gpu_addr)
+{
+ mali_error err = MALI_ERROR_NONE;
+ struct kbase_va_region *reg;
+
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+
+ if (0 == gpu_addr) {
+ KBASE_DEBUG_PRINT_WARN(KBASE_MEM, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n");
+ return MALI_ERROR_FUNCTION_FAILED;
+ }
+ kbase_gpu_vm_lock(kctx);
+
+ if (gpu_addr < PAGE_SIZE) {
+ /* an OS specific cookie, ask the OS specific code to validate it */
+ reg = kbase_lookup_cookie(kctx, gpu_addr);
+ if (!reg) {
+ err = MALI_ERROR_FUNCTION_FAILED;
+ goto out_unlock;
+ }
+
+ /* ask to unlink the cookie as we'll free it */
+ kbase_unlink_cookie(kctx, gpu_addr, reg);
+
+ kbase_free_alloced_region(reg);
+ } else {
+ /* A real GPU va */
+
+ /* Validate the region */
+ reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+ if (!reg) {
+ KBASE_DEBUG_ASSERT_MSG(0, "Trying to free nonexistent region\n 0x%llX", gpu_addr);
+ err = MALI_ERROR_FUNCTION_FAILED;
+ goto out_unlock;
+ }
+
+ err = kbase_mem_free_region(kctx, reg);
+ }
+
+ out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free)
+
+void kbase_update_region_flags(struct kbase_va_region *reg, u32 flags, mali_bool is_growable)
+{
+ KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT((flags & ~((1 << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
+
+ reg->flags |= kbase_cache_enabled(flags, reg->nr_pages);
+
+ if ((flags & BASE_MEM_GROW_ON_GPF) || is_growable) {
+ reg->flags |= KBASE_REG_GROWABLE;
+
+ if (flags & BASE_MEM_GROW_ON_GPF)
+ reg->flags |= KBASE_REG_PF_GROW;
+ } else {
+ /* As this region is not growable but the default is growable, we
+ explicitly clear the growable flag. */
+ reg->flags &= ~KBASE_REG_GROWABLE;
+ }
+
+ if (flags & BASE_MEM_PROT_CPU_WR)
+ reg->flags |= KBASE_REG_CPU_WR;
+
+ if (flags & BASE_MEM_PROT_CPU_RD)
+ reg->flags |= KBASE_REG_CPU_RD;
+
+ if (flags & BASE_MEM_PROT_GPU_WR)
+ reg->flags |= KBASE_REG_GPU_WR;
+
+ if (flags & BASE_MEM_PROT_GPU_RD)
+ reg->flags |= KBASE_REG_GPU_RD;
+
+ if (0 == (flags & BASE_MEM_PROT_GPU_EX))
+ reg->flags |= KBASE_REG_GPU_NX;
+
+ if (flags & BASE_MEM_COHERENT_LOCAL)
+ reg->flags |= KBASE_REG_SHARE_IN;
+ else if (flags & BASE_MEM_COHERENT_SYSTEM)
+ reg->flags |= KBASE_REG_SHARE_BOTH;
+
+}
+KBASE_EXPORT_TEST_API(kbase_update_region_flags)
+
+/**
+ * @brief The memory allocated by the helper will always get cleared
+ */
+mali_error kbase_alloc_phy_pages_helper(struct kbase_va_region *reg, u32 nr_pages_requested)
+{
+ kbase_context * kctx;
+ phys_addr_t * page_array;
+
+ KBASE_DEBUG_ASSERT(reg);
+ KBASE_DEBUG_ASSERT(reg->kctx);
+
+ /* Can't call this on TB buffers */
+ KBASE_DEBUG_ASSERT(0 == (reg->flags & KBASE_REG_IS_TB));
+ /* can't be called on imported types */
+ KBASE_DEBUG_ASSERT(BASE_TMEM_IMPORT_TYPE_INVALID == reg->imported_type);
+ /* Growth of too many pages attempted! (written this way to catch overflow)) */
+ KBASE_DEBUG_ASSERT(reg->nr_pages - reg->nr_alloc_pages >= nr_pages_requested);
+ /* A complete commit is required if not marked as growable */
+ KBASE_DEBUG_ASSERT((reg->flags & KBASE_REG_GROWABLE) || (reg->nr_pages == nr_pages_requested));
+
+ if (0 == nr_pages_requested) {
+ /* early out if nothing to do */
+ return MALI_ERROR_NONE;
+ }
+
+ kctx = reg->kctx;
+ KBASE_DEBUG_ASSERT(kctx);
+
+ if (MALI_ERROR_NONE != kbase_mem_usage_request_pages(&kctx->usage, nr_pages_requested))
+ return MALI_ERROR_OUT_OF_MEMORY;
+
+ if (MALI_ERROR_NONE != kbase_mem_usage_request_pages(&kctx->kbdev->memdev.usage, nr_pages_requested))
+ {
+ kbase_mem_usage_release_pages(&kctx->usage, nr_pages_requested);
+ return MALI_ERROR_OUT_OF_MEMORY;
+ }
+
+ page_array = kbase_get_phy_pages(reg);
+
+ if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(&kctx->osalloc, nr_pages_requested, page_array + reg->nr_alloc_pages, reg->flags))
+ {
+ kbase_mem_usage_release_pages(&kctx->usage, nr_pages_requested);
+ kbase_mem_usage_release_pages(&kctx->kbdev->memdev.usage, nr_pages_requested);
+ return MALI_ERROR_OUT_OF_MEMORY;
+ }
+
+ reg->nr_alloc_pages += nr_pages_requested;
+
+ if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM)
+ {
+ kbase_process_page_usage_inc(reg->kctx, nr_pages_requested);
+ }
+
+ return MALI_ERROR_NONE;
+
+}
+
+void kbase_free_phy_pages_helper(struct kbase_va_region * reg, u32 nr_pages_to_free)
+{
+ kbase_context * kctx;
+ phys_addr_t * page_array;
+ mali_bool sync_back;
+ KBASE_DEBUG_ASSERT(reg);
+ /* Can't call this on TB buffers */
+ KBASE_DEBUG_ASSERT(0 == (reg->flags & KBASE_REG_IS_TB));
+ /* can't be called on imported types */
+ KBASE_DEBUG_ASSERT(BASE_TMEM_IMPORT_TYPE_INVALID == reg->imported_type);
+ /* Free of too many pages attempted! */
+ KBASE_DEBUG_ASSERT(reg->nr_alloc_pages >= nr_pages_to_free);
+ /* A complete free is required if not marked as growable */
+ KBASE_DEBUG_ASSERT((reg->flags & KBASE_REG_GROWABLE) || (reg->nr_alloc_pages == nr_pages_to_free));
+
+ /* early out if nothing to do */
+ if (0 == nr_pages_to_free)
+ return;
+
+ kctx = reg->kctx;
+ KBASE_DEBUG_ASSERT(kctx);
+
+ page_array = kbase_get_phy_pages(reg);
+
+ sync_back = ( reg->flags & KBASE_REG_CPU_CACHED ) ? MALI_TRUE : MALI_FALSE;
+ kbase_mem_allocator_free(&kctx->osalloc, nr_pages_to_free, page_array + reg->nr_alloc_pages - nr_pages_to_free, sync_back);
+