diff options
Diffstat (limited to 'drivers/gpu/arm/utgard/common')
66 files changed, 23384 insertions, 0 deletions
diff --git a/drivers/gpu/arm/utgard/common/mali_broadcast.c b/drivers/gpu/arm/utgard/common/mali_broadcast.c new file mode 100644 index 000000000000..136db61ace4a --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_broadcast.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_broadcast.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" + +#define MALI_BROADCAST_REGISTER_SIZE 0x1000 +#define MALI_BROADCAST_REG_BROADCAST_MASK 0x0 +#define MALI_BROADCAST_REG_INTERRUPT_MASK 0x4 + +struct mali_bcast_unit { + struct mali_hw_core hw_core; + u32 current_mask; +}; + +struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource) +{ + struct mali_bcast_unit *bcast_unit = NULL; + + MALI_DEBUG_ASSERT_POINTER(resource); + MALI_DEBUG_PRINT(2, ("Broadcast: Creating Mali Broadcast unit: %s\n", + resource->description)); + + bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit)); + if (NULL == bcast_unit) { + MALI_PRINT_ERROR(("Broadcast: Failed to allocate memory for Broadcast unit\n")); + return NULL; + } + + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, + resource, MALI_BROADCAST_REGISTER_SIZE)) { + bcast_unit->current_mask = 0; + mali_bcast_reset(bcast_unit); + + return bcast_unit; + } else { + MALI_PRINT_ERROR(("Broadcast: Failed map broadcast unit\n")); + } + + _mali_osk_free(bcast_unit); + + return NULL; +} + +void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit) +{ + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + mali_hw_core_delete(&bcast_unit->hw_core); + _mali_osk_free(bcast_unit); +} + +/* Call this function to add the @group's id into bcast mask + * Note: redundant calling this function with same @group + * doesn't make any difference as calling it once + */ +void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, + struct mali_group *group) +{ + u32 bcast_id; + u32 broadcast_mask; + + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + MALI_DEBUG_ASSERT_POINTER(group); + + bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group)); + + broadcast_mask = bcast_unit->current_mask; + + broadcast_mask |= (bcast_id); /* add PP core to broadcast */ + broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */ + + /* store mask so we can restore on reset */ + bcast_unit->current_mask = broadcast_mask; +} + +/* Call this function to remove @group's id from bcast mask + * Note: redundant calling this function with same @group + * doesn't make any difference as calling it once + */ +void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, + struct mali_group *group) +{ + u32 bcast_id; + u32 broadcast_mask; + + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + MALI_DEBUG_ASSERT_POINTER(group); + + bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group)); + + broadcast_mask = bcast_unit->current_mask; + + broadcast_mask &= ~((bcast_id << 16) | bcast_id); + + /* store mask so we can restore on reset */ + bcast_unit->current_mask = broadcast_mask; +} + +void mali_bcast_reset(struct mali_bcast_unit *bcast_unit) +{ + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + + MALI_DEBUG_PRINT(4, + ("Broadcast: setting mask 0x%08X + 0x%08X (reset)\n", + bcast_unit->current_mask, + bcast_unit->current_mask & 0xFF)); + + /* set broadcast mask */ + mali_hw_core_register_write(&bcast_unit->hw_core, + MALI_BROADCAST_REG_BROADCAST_MASK, + bcast_unit->current_mask); + + /* set IRQ override mask */ + mali_hw_core_register_write(&bcast_unit->hw_core, + MALI_BROADCAST_REG_INTERRUPT_MASK, + bcast_unit->current_mask & 0xFF); +} + +void mali_bcast_disable(struct mali_bcast_unit *bcast_unit) +{ + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + + MALI_DEBUG_PRINT(4, ("Broadcast: setting mask 0x0 + 0x0 (disable)\n")); + + /* set broadcast mask */ + mali_hw_core_register_write(&bcast_unit->hw_core, + MALI_BROADCAST_REG_BROADCAST_MASK, + 0x0); + + /* set IRQ override mask */ + mali_hw_core_register_write(&bcast_unit->hw_core, + MALI_BROADCAST_REG_INTERRUPT_MASK, + 0x0); +} diff --git a/drivers/gpu/arm/utgard/common/mali_broadcast.h b/drivers/gpu/arm/utgard/common/mali_broadcast.h new file mode 100644 index 000000000000..efce44142ee9 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_broadcast.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_BROADCAST_H__ +#define __MALI_BROADCAST_H__ + +/* + * Interface for the broadcast unit on Mali-450. + * + * - Represents up to 8 × (MMU + PP) pairs. + * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by + * setting a mask. + */ + +#include "mali_hw_core.h" +#include "mali_group.h" + +struct mali_bcast_unit; + +struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource); +void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit); + +/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */ +void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group); + +/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */ +void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group); + +/* Re-set cached mask. This needs to be called after having been suspended. */ +void mali_bcast_reset(struct mali_bcast_unit *bcast_unit); + +/** + * Disable broadcast unit + * + * mali_bcast_enable must be called to re-enable the unit. Cores may not be + * added or removed when the unit is disabled. + */ +void mali_bcast_disable(struct mali_bcast_unit *bcast_unit); + +/** + * Re-enable broadcast unit + * + * This resets the masks to include the cores present when mali_bcast_disable was called. + */ +MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit) +{ + mali_bcast_reset(bcast_unit); +} + +#endif /* __MALI_BROADCAST_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_control_timer.c b/drivers/gpu/arm/utgard/common/mali_control_timer.c new file mode 100644 index 000000000000..d0dd95ac1b39 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_control_timer.c @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_utilization.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_dvfs_policy.h" +#include "mali_control_timer.h" + +static u64 period_start_time = 0; + +static _mali_osk_timer_t *mali_control_timer = NULL; +static mali_bool timer_running = MALI_FALSE; + +static u32 mali_control_timeout = 1000; + +void mali_control_timer_add(u32 timeout) +{ + _mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout)); +} + +static void mali_control_timer_callback(void *arg) +{ + if (mali_utilization_enabled()) { + struct mali_gpu_utilization_data *util_data = NULL; + u64 time_period = 0; + mali_bool need_add_timer = MALI_TRUE; + + /* Calculate gpu utilization */ + util_data = mali_utilization_calculate(&period_start_time, &time_period, &need_add_timer); + + if (util_data) { +#if defined(CONFIG_MALI_DVFS) + mali_dvfs_policy_realize(util_data, time_period); +#else + mali_utilization_platform_realize(util_data); +#endif + + if (MALI_TRUE == need_add_timer) { + mali_control_timer_add(mali_control_timeout); + } + } + } +} + +/* Init a timer (for now it is used for GPU utilization and dvfs) */ +_mali_osk_errcode_t mali_control_timer_init(void) +{ + _mali_osk_device_data data; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Use device specific settings (if defined) */ + if (0 != data.control_interval) { + mali_control_timeout = data.control_interval; + MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout)); + } + } + + mali_control_timer = _mali_osk_timer_init(); + if (NULL == mali_control_timer) { + return _MALI_OSK_ERR_FAULT; + } + _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL); + + return _MALI_OSK_ERR_OK; +} + +void mali_control_timer_term(void) +{ + if (NULL != mali_control_timer) { + _mali_osk_timer_del(mali_control_timer); + timer_running = MALI_FALSE; + _mali_osk_timer_term(mali_control_timer); + mali_control_timer = NULL; + } +} + +mali_bool mali_control_timer_resume(u64 time_now) +{ + mali_utilization_data_assert_locked(); + + if (timer_running != MALI_TRUE) { + timer_running = MALI_TRUE; + + period_start_time = time_now; + + mali_utilization_reset(); + + return MALI_TRUE; + } + + return MALI_FALSE; +} + +void mali_control_timer_pause(void) +{ + mali_utilization_data_assert_locked(); + if (timer_running == MALI_TRUE) { + timer_running = MALI_FALSE; + } +} + +void mali_control_timer_suspend(mali_bool suspend) +{ + mali_utilization_data_lock(); + + if (timer_running == MALI_TRUE) { + timer_running = MALI_FALSE; + + mali_utilization_data_unlock(); + + if (suspend == MALI_TRUE) { + _mali_osk_timer_del(mali_control_timer); + mali_utilization_reset(); + } + } else { + mali_utilization_data_unlock(); + } +} diff --git a/drivers/gpu/arm/utgard/common/mali_control_timer.h b/drivers/gpu/arm/utgard/common/mali_control_timer.h new file mode 100644 index 000000000000..4f919ecfd70a --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_control_timer.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_CONTROL_TIMER_H__ +#define __MALI_CONTROL_TIMER_H__ + +#include "mali_osk.h" + +_mali_osk_errcode_t mali_control_timer_init(void); + +void mali_control_timer_term(void); + +mali_bool mali_control_timer_resume(u64 time_now); + +void mali_control_timer_suspend(mali_bool suspend); +void mali_control_timer_pause(void); + +void mali_control_timer_add(u32 timeout); + +#endif /* __MALI_CONTROL_TIMER_H__ */ + diff --git a/drivers/gpu/arm/utgard/common/mali_dlbu.c b/drivers/gpu/arm/utgard/common/mali_dlbu.c new file mode 100644 index 000000000000..efe1ab3ab5cc --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_dlbu.c @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_dlbu.h" +#include "mali_memory.h" +#include "mali_pp.h" +#include "mali_group.h" +#include "mali_osk.h" +#include "mali_hw_core.h" + +/** + * Size of DLBU registers in bytes + */ +#define MALI_DLBU_SIZE 0x400 + +mali_dma_addr mali_dlbu_phys_addr = 0; +static mali_io_address mali_dlbu_cpu_addr = NULL; + +/** + * DLBU register numbers + * Used in the register read/write routines. + * See the hardware documentation for more information about each register + */ +typedef enum mali_dlbu_register { + MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address; + 31:12 Physical address to the page used for the DLBU + 0 DLBU enable - set this bit to 1 enables the AXI bus + between PPs and L2s, setting to 0 disables the router and + no further transactions are sent to DLBU */ + MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address; + 31:12 Virtual address to the page used for the DLBU */ + MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address; + 31:12 Virtual address to the tile list. This address is used when + calculating the call address sent to PP.*/ + MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension; + 23:16 Number of tiles in Y direction-1 + 7:0 Number of tiles in X direction-1 */ + MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration; + 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024 + 21:16 2^n number of tiles to be binned to one tile list in Y direction + 5:0 2^n number of tiles to be binned to one tile list in X direction */ + MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions; + 31:24 start position in Y direction for group 1 + 23:16 start position in X direction for group 1 + 15:8 start position in Y direction for group 0 + 7:0 start position in X direction for group 0 */ + MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask; + 7 enable PP7 for load balancing + 6 enable PP6 for load balancing + 5 enable PP5 for load balancing + 4 enable PP4 for load balancing + 3 enable PP3 for load balancing + 2 enable PP2 for load balancing + 1 enable PP1 for load balancing + 0 enable PP0 for load balancing */ +} mali_dlbu_register; + +typedef enum { + PP0ENABLE = 0, + PP1ENABLE, + PP2ENABLE, + PP3ENABLE, + PP4ENABLE, + PP5ENABLE, + PP6ENABLE, + PP7ENABLE +} mali_dlbu_pp_enable; + +struct mali_dlbu_core { + struct mali_hw_core hw_core; /**< Common for all HW cores */ + u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU + see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */ +}; + +_mali_osk_errcode_t mali_dlbu_initialize(void) +{ + MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n")); + + if (_MALI_OSK_ERR_OK == + mali_mmu_get_table_page(&mali_dlbu_phys_addr, + &mali_dlbu_cpu_addr)) { + return _MALI_OSK_ERR_OK; + } + + return _MALI_OSK_ERR_FAULT; +} + +void mali_dlbu_terminate(void) +{ + MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n")); + + if (0 != mali_dlbu_phys_addr && 0 != mali_dlbu_cpu_addr) { + mali_mmu_release_table_page(mali_dlbu_phys_addr, + mali_dlbu_cpu_addr); + mali_dlbu_phys_addr = 0; + mali_dlbu_cpu_addr = 0; + } +} + +struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource) +{ + struct mali_dlbu_core *core = NULL; + + MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description)); + + core = _mali_osk_malloc(sizeof(struct mali_dlbu_core)); + if (NULL != core) { + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) { + core->pp_cores_mask = 0; + if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) { + return core; + } + MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description)); + mali_hw_core_delete(&core->hw_core); + } + + _mali_osk_free(core); + } else { + MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n")); + } + + return NULL; +} + +void mali_dlbu_delete(struct mali_dlbu_core *dlbu) +{ + MALI_DEBUG_ASSERT_POINTER(dlbu); + mali_hw_core_delete(&dlbu->hw_core); + _mali_osk_free(dlbu); +} + +_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu) +{ + u32 dlbu_registers[7]; + _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; + MALI_DEBUG_ASSERT_POINTER(dlbu); + + MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description)); + + dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */ + dlbu_registers[1] = MALI_DLBU_VIRT_ADDR; + dlbu_registers[2] = 0; + dlbu_registers[3] = 0; + dlbu_registers[4] = 0; + dlbu_registers[5] = 0; + dlbu_registers[6] = dlbu->pp_cores_mask; + + /* write reset values to core registers */ + mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7); + + err = _MALI_OSK_ERR_OK; + + return err; +} + +void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu) +{ + MALI_DEBUG_ASSERT_POINTER(dlbu); + + mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask); +} + +void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group) +{ + struct mali_pp_core *pp_core; + u32 bcast_id; + + MALI_DEBUG_ASSERT_POINTER(dlbu); + MALI_DEBUG_ASSERT_POINTER(group); + + pp_core = mali_group_get_pp_core(group); + bcast_id = mali_pp_core_get_bcast_id(pp_core); + + dlbu->pp_cores_mask |= bcast_id; + MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask)); +} + +/* Remove a group from the DLBU */ +void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group) +{ + struct mali_pp_core *pp_core; + u32 bcast_id; + + MALI_DEBUG_ASSERT_POINTER(dlbu); + MALI_DEBUG_ASSERT_POINTER(group); + + pp_core = mali_group_get_pp_core(group); + bcast_id = mali_pp_core_get_bcast_id(pp_core); + + dlbu->pp_cores_mask &= ~bcast_id; + MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask)); +} + +/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */ +void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job) +{ + u32 *registers; + MALI_DEBUG_ASSERT(job); + registers = mali_pp_job_get_dlbu_registers(job); + MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n")); + + /* Writing 4 registers: + * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */ + mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4); + +} diff --git a/drivers/gpu/arm/utgard/common/mali_dlbu.h b/drivers/gpu/arm/utgard/common/mali_dlbu.h new file mode 100644 index 000000000000..6b068884bd49 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_dlbu.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_DLBU_H__ +#define __MALI_DLBU_H__ + +#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */ + +#include "mali_osk.h" + +struct mali_pp_job; +struct mali_group; +struct mali_dlbu_core; + +extern mali_dma_addr mali_dlbu_phys_addr; + +_mali_osk_errcode_t mali_dlbu_initialize(void); +void mali_dlbu_terminate(void); + +struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource); +void mali_dlbu_delete(struct mali_dlbu_core *dlbu); + +_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu); + +void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group); +void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group); + +/** @brief Called to update HW after DLBU state changed + * + * This function must be called after \a mali_dlbu_add_group or \a + * mali_dlbu_remove_group to write the updated mask to hardware, unless the + * same is accomplished by calling \a mali_dlbu_reset. + */ +void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu); + +void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job); + +#endif /* __MALI_DLBU_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c new file mode 100644 index 000000000000..12ba069ec13b --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c @@ -0,0 +1,308 @@ +/* + * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include <linux/mali/mali_utgard.h> +#include "mali_kernel_common.h" +#include "mali_scheduler.h" +#include "mali_dvfs_policy.h" +#include "mali_osk_mali.h" +#include "mali_osk_profiling.h" + +#define CLOCK_TUNING_TIME_DEBUG 0 + +#define MAX_PERFORMANCE_VALUE 256 +#define MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(percent) ((int) ((percent)*(MAX_PERFORMANCE_VALUE)/100.0 + 0.5)) + +/** The max fps the same as display vsync default 60, can set by module insert parameter */ +int mali_max_system_fps = 60; +/** A lower limit on their desired FPS default 58, can set by module insert parameter */ +int mali_desired_fps = 58; + +static int mali_fps_step1 = 0; +static int mali_fps_step2 = 0; + +static int clock_step = -1; +static int cur_clk_step = -1; +static struct mali_gpu_clock *gpu_clk = NULL; + +/*Function prototype */ +static int (*mali_gpu_set_freq)(int) = NULL; +static int (*mali_gpu_get_freq)(void) = NULL; + +static mali_bool mali_dvfs_enabled = MALI_FALSE; + +#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL +static u32 calculate_window_render_fps(u64 time_period) +{ + u32 max_window_number; + u64 tmp; + u64 max = time_period; + u32 leading_zeroes; + u32 shift_val; + u32 time_period_shift; + u32 max_window_number_shift; + u32 ret_val; + + max_window_number = mali_session_max_window_num(); + + /* To avoid float division, extend the dividend to ns unit */ + tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND; + if (tmp > time_period) { + max = tmp; + } + + /* + * We may have 64-bit values, a dividend or a divisor or both + * To avoid dependencies to a 64-bit divider, we shift down the two values + * equally first. + */ + leading_zeroes = _mali_osk_clz((u32)(max >> 32)); + shift_val = 32 - leading_zeroes; + + time_period_shift = (u32)(time_period >> shift_val); + max_window_number_shift = (u32)(tmp >> shift_val); + + ret_val = max_window_number_shift / time_period_shift; + + return ret_val; +} + +static bool mali_pickup_closest_avail_clock(int target_clock_mhz, mali_bool pick_clock_up) +{ + int i = 0; + bool clock_changed = false; + + /* Round up the closest available frequency step for target_clock_hz */ + for (i = 0; i < gpu_clk->num_of_steps; i++) { + /* Find the first item > target_clock_hz */ + if (((int)(gpu_clk->item[i].clock) - target_clock_mhz) > 0) { + break; + } + } + + /* If the target clock greater than the maximum clock just pick the maximum one*/ + if (i == gpu_clk->num_of_steps) { + i = gpu_clk->num_of_steps - 1; + } else { + if ((!pick_clock_up) && (i > 0)) { + i = i - 1; + } + } + + clock_step = i; + if (cur_clk_step != clock_step) { + clock_changed = true; + } + + return clock_changed; +} + +void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period) +{ + int under_perform_boundary_value = 0; + int over_perform_boundary_value = 0; + int current_fps = 0; + int current_gpu_util = 0; + bool clock_changed = false; +#if CLOCK_TUNING_TIME_DEBUG + struct timeval start; + struct timeval stop; + unsigned int elapse_time; + do_gettimeofday(&start); +#endif + u32 window_render_fps; + + if (NULL == gpu_clk) { + MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n")); + return; + } + + window_render_fps = calculate_window_render_fps(time_period); + + current_fps = window_render_fps; + current_gpu_util = data->utilization_gpu; + + /* Get the specific under_perform_boundary_value and over_perform_boundary_value */ + if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) { + under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90); + over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70); + } else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) { + under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55); + over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35); + } else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) { + under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70); + over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50); + } else { + under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55); + over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35); + } + + MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util)); + MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d, over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value)); + MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d, pressure render fps = %d \n", current_fps, window_render_fps)); + + /* Get current clock value */ + cur_clk_step = mali_gpu_get_freq(); + + /* Consider offscreen */ + if (0 == current_fps) { + /* GP or PP under perform, need to give full power */ + if (current_gpu_util > over_perform_boundary_value) { + if (cur_clk_step != gpu_clk->num_of_steps - 1) { + clock_changed = true; + clock_step = gpu_clk->num_of_steps - 1; + } + } + + /* If GPU is idle, use lowest power */ + if (0 == current_gpu_util) { + if (cur_clk_step != 0) { + clock_changed = true; + clock_step = 0; + } + } + + goto real_setting; + } + + /* 2. Calculate target clock if the GPU clock can be tuned */ + if (-1 != cur_clk_step) { + int target_clk_mhz = -1; + mali_bool pick_clock_up = MALI_TRUE; + + if (current_gpu_util > under_perform_boundary_value) { + /* when under perform, need to consider the fps part */ + target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps; + pick_clock_up = MALI_TRUE; + } else if (current_gpu_util < over_perform_boundary_value) { + /* when over perform, did't need to consider fps, system didn't want to reach desired fps */ + target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value; + pick_clock_up = MALI_FALSE; + } + + if (-1 != target_clk_mhz) { + clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up); + } + } + +real_setting: + if (clock_changed) { + mali_gpu_set_freq(clock_step); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, + gpu_clk->item[clock_step].clock, + gpu_clk->item[clock_step].vol / 1000, + 0, 0, 0); + } + +#if CLOCK_TUNING_TIME_DEBUG + do_gettimeofday(&stop); + + elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start); + MALI_DEBUG_PRINT(2, ("Using ARM power policy: eclapse time = %d\n", elapse_time)); +#endif +} + +_mali_osk_errcode_t mali_dvfs_policy_init(void) +{ + _mali_osk_device_data data; + _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + if ((NULL != data.get_clock_info) && (NULL != data.set_freq) && (NULL != data.get_freq)) { + MALI_DEBUG_PRINT(2, ("Mali DVFS init: using arm dvfs policy \n")); + + + mali_fps_step1 = mali_max_system_fps / 3; + mali_fps_step2 = mali_max_system_fps / 5; + + data.get_clock_info(&gpu_clk); + + if (gpu_clk != NULL) { +#ifdef DEBUG + int i; + for (i = 0; i < gpu_clk->num_of_steps; i++) { + MALI_DEBUG_PRINT(5, ("mali gpu clock info: step%d clock(%d)Hz,vol(%d) \n", + i, gpu_clk->item[i].clock, gpu_clk->item[i].vol)); + } +#endif + } else { + MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform didn't define enough info for ddk to do DVFS \n")); + } + + mali_gpu_get_freq = data.get_freq; + mali_gpu_set_freq = data.set_freq; + + if ((NULL != gpu_clk) && (gpu_clk->num_of_steps > 0) + && (NULL != mali_gpu_get_freq) && (NULL != mali_gpu_set_freq)) { + mali_dvfs_enabled = MALI_TRUE; + } + } else { + MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform function callback incomplete, need check mali_gpu_device_data in platform .\n")); + } + } else { + err = _MALI_OSK_ERR_FAULT; + MALI_DEBUG_PRINT(2, ("Mali DVFS init: get platform data error .\n")); + } + + return err; +} + +/* + * Always give full power when start a new period, + * if mali dvfs enabled, for performance consideration + */ +void mali_dvfs_policy_new_period(void) +{ + /* Always give full power when start a new period */ + unsigned int cur_clk_step = 0; + + cur_clk_step = mali_gpu_get_freq(); + + if (cur_clk_step != (gpu_clk->num_of_steps - 1)) { + mali_gpu_set_freq(gpu_clk->num_of_steps - 1); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gpu_clk->item[gpu_clk->num_of_steps - 1].clock, + gpu_clk->item[gpu_clk->num_of_steps - 1].vol / 1000, 0, 0, 0); + } +} + +mali_bool mali_dvfs_policy_enabled(void) +{ + return mali_dvfs_enabled; +} + +#if defined(CONFIG_MALI400_PROFILING) +void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item) +{ + if (mali_platform_device != NULL) { + + struct mali_gpu_device_data *device_data = NULL; + device_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data; + + if ((NULL != device_data->get_clock_info) && (NULL != device_data->get_freq)) { + + int cur_clk_step = device_data->get_freq(); + struct mali_gpu_clock *mali_gpu_clk = NULL; + + device_data->get_clock_info(&mali_gpu_clk); + clk_item->clock = mali_gpu_clk->item[cur_clk_step].clock; + clk_item->vol = mali_gpu_clk->item[cur_clk_step].vol; + } else { + MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: platform function callback incomplete, need check mali_gpu_device_data in platform .\n")); + } + } +} +#endif + diff --git a/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h new file mode 100644 index 000000000000..55e4b354c1a9 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_DVFS_POLICY_H__ +#define __MALI_DVFS_POLICY_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period); + +_mali_osk_errcode_t mali_dvfs_policy_init(void); + +void mali_dvfs_policy_new_period(void); + +mali_bool mali_dvfs_policy_enabled(void); + +#if defined(CONFIG_MALI400_PROFILING) +void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item); +#endif + +#ifdef __cplusplus +} +#endif + +#endif/* __MALI_DVFS_POLICY_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_executor.c b/drivers/gpu/arm/utgard/common/mali_executor.c new file mode 100644 index 000000000000..3c2864386798 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_executor.c @@ -0,0 +1,2642 @@ +/* + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_executor.h" +#include "mali_scheduler.h" +#include "mali_kernel_common.h" +#include "mali_kernel_core.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_pp.h" +#include "mali_pp_job.h" +#include "mali_group.h" +#include "mali_pm.h" +#include "mali_timeline.h" +#include "mali_osk_profiling.h" +#include "mali_session.h" + +/* + * If dma_buf with map on demand is used, we defer job deletion and job queue + * if in atomic context, since both might sleep. + */ +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1 +#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1 +#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */ + +/* + * ---------- static type definitions (structs, enums, etc) ---------- + */ + +enum mali_executor_state_t { + EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */ + EXEC_STATE_DISABLED, /* Disabled by core scaling (do not use) */ + EXEC_STATE_EMPTY, /* No child groups for virtual group (do not use) */ + EXEC_STATE_INACTIVE, /* Can be used, but must be activate first */ + EXEC_STATE_IDLE, /* Active and ready to be used */ + EXEC_STATE_WORKING, /* Executing a job */ +}; + +/* + * ---------- global variables (exported due to inline functions) ---------- + */ + +/* Lock for this module (protecting all HW access except L2 caches) */ +_mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL; + +mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX]; + +/* + * ---------- static variables ---------- + */ + +/* Used to defer job scheduling */ +static _mali_osk_wq_work_t *executor_wq_high_pri = NULL; + +/* Store version from GP and PP (user space wants to know this) */ +static u32 pp_version = 0; +static u32 gp_version = 0; + +/* List of physical PP groups which are disabled by some external source */ +static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled); +static u32 group_list_disabled_count = 0; + +/* List of groups which can be used, but activate first */ +static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive); +static u32 group_list_inactive_count = 0; + +/* List of groups which are active and ready to be used */ +static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle); +static u32 group_list_idle_count = 0; + +/* List of groups which are executing a job */ +static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working); +static u32 group_list_working_count = 0; + +/* Virtual group (if any) */ +static struct mali_group *virtual_group = NULL; + +/* Virtual group state is tracked with a state variable instead of 4 lists */ +static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT; + +/* GP group */ +static struct mali_group *gp_group = NULL; + +/* GP group state is tracked with a state variable instead of 4 lists */ +static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT; + +static u32 gp_returned_cookie = 0; + +/* Total number of physical PP cores present */ +static u32 num_physical_pp_cores_total = 0; + +/* Number of physical cores which are enabled */ +static u32 num_physical_pp_cores_enabled = 0; + +/* Enable or disable core scaling */ +static mali_bool core_scaling_enabled = MALI_TRUE; + +/* Variables to allow safe pausing of the scheduler */ +static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL; +static u32 pause_count = 0; + +/* PP cores haven't been enabled because of some pp cores haven't been disabled. */ +static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 }; + +/* Variables used to implement notify pp core changes to userspace when core scaling + * is finished in mali_executor_complete_group() function. */ +static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL; +static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL; + +/* + * ---------- Forward declaration of static functions ---------- + */ +static mali_bool mali_executor_is_suspended(void *data); +static mali_bool mali_executor_is_working(void); +static void mali_executor_disable_empty_virtual(void); +static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group); +static mali_bool mali_executor_has_virtual_group(void); +static mali_bool mali_executor_virtual_group_is_usable(void); +static void mali_executor_schedule(void); +static void mali_executor_wq_schedule(void *arg); +static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job, u32 added_size); +static void mali_executor_complete_group(struct mali_group *group, + mali_bool success, + struct mali_gp_job **gp_job_done, + struct mali_pp_job **pp_job_done); +static void mali_executor_change_state_pp_physical(struct mali_group *group, + _mali_osk_list_t *old_list, + u32 *old_count, + _mali_osk_list_t *new_list, + u32 *new_count); +static mali_bool mali_executor_group_is_in_state(struct mali_group *group, + enum mali_executor_state_t state); + +static void mali_executor_group_enable_internal(struct mali_group *group); +static void mali_executor_group_disable_internal(struct mali_group *group); +static void mali_executor_core_scale(unsigned int target_core_nr); +static void mali_executor_core_scale_in_group_complete(struct mali_group *group); +static void mali_executor_notify_core_change(u32 num_cores); +static void mali_executor_wq_notify_core_change(void *arg); +static void mali_executor_change_group_status_disabled(struct mali_group *group); +static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group); +static void mali_executor_set_state_pp_physical(struct mali_group *group, + _mali_osk_list_t *new_list, + u32 *new_count); + +/* + * ---------- Actual implementation ---------- + */ + +_mali_osk_errcode_t mali_executor_initialize(void) +{ + mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR); + if (NULL == mali_executor_lock_obj) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL); + if (NULL == executor_wq_high_pri) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + executor_working_wait_queue = _mali_osk_wait_queue_init(); + if (NULL == executor_working_wait_queue) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL); + if (NULL == executor_wq_notify_core_change) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init(); + if (NULL == executor_notify_core_change_wait_queue) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_executor_terminate(void) +{ + if (NULL != executor_notify_core_change_wait_queue) { + _mali_osk_wait_queue_term(executor_notify_core_change_wait_queue); + executor_notify_core_change_wait_queue = NULL; + } + + if (NULL != executor_wq_notify_core_change) { + _mali_osk_wq_delete_work(executor_wq_notify_core_change); + executor_wq_notify_core_change = NULL; + } + + if (NULL != executor_working_wait_queue) { + _mali_osk_wait_queue_term(executor_working_wait_queue); + executor_working_wait_queue = NULL; + } + + if (NULL != executor_wq_high_pri) { + _mali_osk_wq_delete_work(executor_wq_high_pri); + executor_wq_high_pri = NULL; + } + + if (NULL != mali_executor_lock_obj) { + _mali_osk_spinlock_irq_term(mali_executor_lock_obj); + mali_executor_lock_obj = NULL; + } +} + +void mali_executor_populate(void) +{ + u32 num_groups; + u32 i; + + num_groups = mali_group_get_glob_num_groups(); + + /* Do we have a virtual group? */ + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + if (mali_group_is_virtual(group)) { + virtual_group = group; + virtual_group_state = EXEC_STATE_INACTIVE; + break; + } + } + + /* Find all the available physical GP and PP cores */ + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + if (NULL != group) { + struct mali_pp_core *pp_core = mali_group_get_pp_core(group); + struct mali_gp_core *gp_core = mali_group_get_gp_core(group); + + if (!mali_group_is_virtual(group)) { + if (NULL != pp_core) { + if (0 == pp_version) { + /* Retrieve PP version from the first available PP core */ + pp_version = mali_pp_core_get_version(pp_core); + } + + if (NULL != virtual_group) { + mali_executor_lock(); + mali_group_add_group(virtual_group, group); + mali_executor_unlock(); + } else { + _mali_osk_list_add(&group->executor_list, &group_list_inactive); + group_list_inactive_count++; + } + + num_physical_pp_cores_total++; + } else { + MALI_DEBUG_ASSERT_POINTER(gp_core); + + if (0 == gp_version) { + /* Retrieve GP version */ + gp_version = mali_gp_core_get_version(gp_core); + } + + gp_group = group; + gp_group_state = EXEC_STATE_INACTIVE; + } + + } + } + } + + num_physical_pp_cores_enabled = num_physical_pp_cores_total; +} + +void mali_executor_depopulate(void) +{ + struct mali_group *group; + struct mali_group *temp; + + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state); + + if (NULL != gp_group) { + mali_group_delete(gp_group); + gp_group = NULL; + } + + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state); + + if (NULL != virtual_group) { + mali_group_delete(virtual_group); + virtual_group = NULL; + } + + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working)); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) { + mali_group_delete(group); + } + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) { + mali_group_delete(group); + } + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) { + mali_group_delete(group); + } +} + +void mali_executor_suspend(void) +{ + mali_executor_lock(); + + /* Increment the pause_count so that no more jobs will be scheduled */ + pause_count++; + + mali_executor_unlock(); + + _mali_osk_wait_queue_wait_event(executor_working_wait_queue, + mali_executor_is_suspended, NULL); + + /* + * mali_executor_complete_XX() leaves jobs in idle state. + * deactivate option is used when we are going to power down + * the entire GPU (OS suspend) and want a consistent SW vs HW + * state. + */ + mali_executor_lock(); + + mali_executor_deactivate_list_idle(MALI_TRUE); + + /* + * The following steps are used to deactive all of activated + * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP + * _STAET_ACTIVATION_PENDING) groups, to make sure the variable + * pd_mask_wanted is equal with 0. */ + if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) { + gp_group_state = EXEC_STATE_INACTIVE; + mali_group_deactivate(gp_group); + } + + if (mali_executor_has_virtual_group()) { + if (MALI_GROUP_STATE_INACTIVE + != mali_group_get_state(virtual_group)) { + virtual_group_state = EXEC_STATE_INACTIVE; + mali_group_deactivate(virtual_group); + } + } + + if (0 < group_list_inactive_count) { + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, + &group_list_inactive, + struct mali_group, executor_list) { + if (MALI_GROUP_STATE_ACTIVATION_PENDING + == mali_group_get_state(group)) { + mali_group_deactivate(group); + } + + /* + * On mali-450 platform, we may have physical group in the group inactive + * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only + * deactivate it is not enough, we still also need add it back to virtual group. + * And now, virtual group must be in INACTIVE state, so it's safe to add + * physical group to virtual group at this point. + */ + if (NULL != virtual_group) { + _mali_osk_list_delinit(&group->executor_list); + group_list_inactive_count--; + + mali_group_add_group(virtual_group, group); + } + } + } + + mali_executor_unlock(); +} + +void mali_executor_resume(void) +{ + mali_executor_lock(); + + /* Decrement pause_count to allow scheduling again (if it reaches 0) */ + pause_count--; + if (0 == pause_count) { + mali_executor_schedule(); + } + + mali_executor_unlock(); +} + +u32 mali_executor_get_num_cores_total(void) +{ + return num_physical_pp_cores_total; +} + +u32 mali_executor_get_num_cores_enabled(void) +{ + return num_physical_pp_cores_enabled; +} + +struct mali_pp_core *mali_executor_get_virtual_pp(void) +{ + MALI_DEBUG_ASSERT_POINTER(virtual_group); + MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core); + return virtual_group->pp_core; +} + +struct mali_group *mali_executor_get_virtual_group(void) +{ + return virtual_group; +} + +void mali_executor_zap_all_active(struct mali_session_data *session) +{ + struct mali_group *group; + struct mali_group *temp; + mali_bool ret; + + mali_executor_lock(); + + /* + * This function is a bit complicated because + * mali_group_zap_session() can fail. This only happens because the + * group is in an unhandled page fault status. + * We need to make sure this page fault is handled before we return, + * so that we know every single outstanding MMU transactions have + * completed. This will allow caller to safely remove physical pages + * when we have returned. + */ + + MALI_DEBUG_ASSERT(NULL != gp_group); + ret = mali_group_zap_session(gp_group, session); + if (MALI_FALSE == ret) { + struct mali_gp_job *gp_job = NULL; + + mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL); + + MALI_DEBUG_ASSERT_POINTER(gp_job); + + /* GP job completed, make sure it is freed */ + mali_scheduler_complete_gp_job(gp_job, MALI_FALSE, + MALI_TRUE, MALI_TRUE); + } + + if (mali_executor_has_virtual_group()) { + ret = mali_group_zap_session(virtual_group, session); + if (MALI_FALSE == ret) { + struct mali_pp_job *pp_job = NULL; + + mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job); + + if (NULL != pp_job) { + /* PP job completed, make sure it is freed */ + mali_scheduler_complete_pp_job(pp_job, 0, + MALI_FALSE, MALI_TRUE); + } + } + } + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, + struct mali_group, executor_list) { + ret = mali_group_zap_session(group, session); + if (MALI_FALSE == ret) { + ret = mali_group_zap_session(group, session); + if (MALI_FALSE == ret) { + struct mali_pp_job *pp_job = NULL; + + mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job); + + if (NULL != pp_job) { + /* PP job completed, free it */ + mali_scheduler_complete_pp_job(pp_job, + 0, MALI_FALSE, + MALI_TRUE); + } + } + } + } + + mali_executor_unlock(); +} + +void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule) +{ + if (MALI_SCHEDULER_MASK_EMPTY != mask) { + if (MALI_TRUE == deferred_schedule) { + _mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri); + } else { + /* Schedule from this thread*/ + mali_executor_lock(); + mali_executor_schedule(); + mali_executor_unlock(); + } + } +} + +_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, + mali_bool in_upper_half) +{ + enum mali_interrupt_result int_result; + mali_bool time_out = MALI_FALSE; + + MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n", + mali_group_core_description(group), + in_upper_half ? "upper" : "bottom")); + + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_working(group)); + + if (mali_group_has_timed_out(group)) { + int_result = MALI_INTERRUPT_RESULT_ERROR; + time_out = MALI_TRUE; + MALI_PRINT(("Executor GP: Job %d Timeout on %s\n", + mali_gp_job_get_id(group->gp_running_job), + mali_group_core_description(group))); + } else { + int_result = mali_group_get_interrupt_result_gp(group); + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } + +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + /* No interrupts signalled, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#else + MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result); +#endif + + mali_group_mask_all_interrupts_gp(group); + + if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) { + if (mali_group_gp_is_active(group)) { + /* Only VS completed so far, while PLBU is still active */ + + /* Enable all but the current interrupt */ + mali_group_enable_interrupts_gp(group, int_result); + + mali_executor_unlock(); + return _MALI_OSK_ERR_OK; + } + } else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) { + if (mali_group_gp_is_active(group)) { + /* Only PLBU completed so far, while VS is still active */ + + /* Enable all but the current interrupt */ + mali_group_enable_interrupts_gp(group, int_result); + + mali_executor_unlock(); + return _MALI_OSK_ERR_OK; + } + } else if (MALI_INTERRUPT_RESULT_OOM == int_result) { + + mali_executor_unlock(); + + mali_group_schedule_oom_work_handler(group); + + return _MALI_OSK_ERR_OK; + } + + /* We should now have a real interrupt to handle */ + + MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n", + mali_group_core_description(group), + (MALI_INTERRUPT_RESULT_ERROR == int_result) ? + "ERROR" : "success")); + + if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) { + /* Don't bother to do processing of errors in upper half */ + mali_executor_unlock(); + + if (MALI_FALSE == time_out) { + mali_group_schedule_bottom_half_gp(group); + } + } else { + struct mali_gp_job *job; + mali_bool success; + + if (MALI_TRUE == time_out) { + mali_group_dump_status(group); + } + + success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ? + MALI_TRUE : MALI_FALSE; + + mali_executor_complete_group(group, success, &job, NULL); + + mali_executor_unlock(); + + /* GP jobs always fully complete */ + MALI_DEBUG_ASSERT(NULL != job); + + /* This will notify user space and close the job object */ + mali_scheduler_complete_gp_job(job, success, + MALI_TRUE, MALI_TRUE); + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, + mali_bool in_upper_half) +{ + enum mali_interrupt_result int_result; + mali_bool time_out = MALI_FALSE; + + MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n", + mali_group_core_description(group), + in_upper_half ? "upper" : "bottom")); + + mali_executor_lock(); + + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + + if (in_upper_half) { + if (mali_group_is_in_virtual(group)) { + /* Child groups should never handle PP interrupts */ + MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group)); + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_working(group)); + MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group)); + + if (mali_group_has_timed_out(group)) { + int_result = MALI_INTERRUPT_RESULT_ERROR; + time_out = MALI_TRUE; + MALI_PRINT(("Executor PP: Job %d Timeout on %s\n", + mali_pp_job_get_id(group->pp_running_job), + mali_group_core_description(group))); + } else { + int_result = mali_group_get_interrupt_result_pp(group); + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } + +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + /* No interrupts signalled, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) { + if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) { + /* Some child groups are still working, so nothing to do right now */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } +#else + MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result); +#endif + + /* We should now have a real interrupt to handle */ + + MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n", + mali_group_core_description(group), + (MALI_INTERRUPT_RESULT_ERROR == int_result) ? + "ERROR" : "success")); + + if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) { + /* Don't bother to do processing of errors in upper half */ + mali_group_mask_all_interrupts_pp(group); + mali_executor_unlock(); + + if (MALI_FALSE == time_out) { + mali_group_schedule_bottom_half_pp(group); + } + } else { + struct mali_pp_job *job = NULL; + mali_bool success; + + if (MALI_TRUE == time_out) { + mali_group_dump_status(group); + } + + success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ? + MALI_TRUE : MALI_FALSE; + + mali_executor_complete_group(group, success, NULL, &job); + + mali_executor_unlock(); + + if (NULL != job) { + /* Notify user space and close the job object */ + mali_scheduler_complete_pp_job(job, + num_physical_pp_cores_total, + MALI_TRUE, MALI_TRUE); + } + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, + mali_bool in_upper_half) +{ + enum mali_interrupt_result int_result; + + MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n", + mali_group_core_description(group), + in_upper_half ? "upper" : "bottom")); + + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_working(group)); + + int_result = mali_group_get_interrupt_result_mmu(group); + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + /* No interrupts signalled, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#else + MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result); +#endif + + /* We should now have a real interrupt to handle */ + + if (in_upper_half) { + /* Don't bother to do processing of errors in upper half */ + + struct mali_group *parent = group->parent_group; + + mali_mmu_mask_all_interrupts(group->mmu); + + mali_executor_unlock(); + + if (NULL == parent) { + mali_group_schedule_bottom_half_mmu(group); + } else { + mali_group_schedule_bottom_half_mmu(parent); + } + + } else { + struct mali_gp_job *gp_job = NULL; + struct mali_pp_job *pp_job = NULL; + +#ifdef DEBUG + + u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu); + u32 status = mali_mmu_get_status(group->mmu); + MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n", + (void *)(uintptr_t)fault_address, + (status >> 6) & 0x1F, + (status & 32) ? "write" : "read", + group->mmu->hw_core.description)); + MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n", + mali_mmu_get_rawstat(group->mmu), status)); + mali_mmu_pagedir_diag(mali_session_get_page_directory(group->session), fault_address); +#endif + + mali_executor_complete_group(group, MALI_FALSE, &gp_job, &pp_job); + + mali_executor_unlock(); + + if (NULL != gp_job) { + MALI_DEBUG_ASSERT(NULL == pp_job); + + /* Notify user space and close the job object */ + mali_scheduler_complete_gp_job(gp_job, MALI_FALSE, + MALI_TRUE, MALI_TRUE); + } else if (NULL != pp_job) { + MALI_DEBUG_ASSERT(NULL == gp_job); + + /* Notify user space and close the job object */ + mali_scheduler_complete_pp_job(pp_job, + num_physical_pp_cores_total, + MALI_TRUE, MALI_TRUE); + } + } + + return _MALI_OSK_ERR_OK; +} + +void mali_executor_group_oom(struct mali_group *group) +{ + struct mali_gp_job *job = NULL; + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + mali_executor_lock(); + + job = mali_group_get_running_gp_job(group); + + MALI_DEBUG_ASSERT_POINTER(job); + +#if defined(CONFIG_MALI400_PROFILING) + /* Give group a chance to generate a SUSPEND event */ + mali_group_oom(group); +#endif + + mali_gp_job_set_current_heap_addr(job, mali_gp_read_plbu_alloc_start_addr(group->gp_core)); + + mali_executor_unlock(); + + if (_MALI_OSK_ERR_OK == mali_mem_add_mem_size(job->session, job->heap_base_addr, job->heap_grow_size)) { + _mali_osk_notification_t *new_notification = NULL; + + new_notification = _mali_osk_notification_create( + _MALI_NOTIFICATION_GP_STALLED, + sizeof(_mali_uk_gp_job_suspended_s)); + + /* resume job with new heap, + * This will also re-enable interrupts + */ + mali_executor_lock(); + + mali_executor_send_gp_oom_to_user(job, job->heap_grow_size); + + if (NULL != new_notification) { + + mali_gp_job_set_oom_notification(job, new_notification); + + mali_group_resume_gp_with_new_heap(group, mali_gp_job_get_id(job), + job->heap_current_addr, + job->heap_current_addr + job->heap_grow_size); + } + mali_executor_unlock(); + } else { + mali_executor_lock(); + mali_executor_send_gp_oom_to_user(job, 0); + mali_executor_unlock(); + } + +} + +void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups) +{ + u32 i; + mali_bool child_groups_activated = MALI_FALSE; + mali_bool do_schedule = MALI_FALSE; +#if defined(DEBUG) + u32 num_activated = 0; +#endif + + MALI_DEBUG_ASSERT_POINTER(groups); + MALI_DEBUG_ASSERT(0 < num_groups); + + mali_executor_lock(); + + MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups)); + + for (i = 0; i < num_groups; i++) { + MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n", + mali_group_core_description(groups[i]))); + + mali_group_power_up(groups[i]); + + if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) || + (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) { + /* nothing more to do for this group */ + continue; + } + + MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n", + mali_group_core_description(groups[i]))); + +#if defined(DEBUG) + num_activated++; +#endif + + if (mali_group_is_in_virtual(groups[i])) { + /* + * At least one child group of virtual group is powered on. + */ + child_groups_activated = MALI_TRUE; + } else if (MALI_FALSE == mali_group_is_virtual(groups[i])) { + /* Set gp and pp not in virtual to active. */ + mali_group_set_active(groups[i]); + } + + /* Move group from inactive to idle list */ + if (groups[i] == gp_group) { + MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE == + gp_group_state); + gp_group_state = EXEC_STATE_IDLE; + } else if (MALI_FALSE == mali_group_is_in_virtual(groups[i]) + && MALI_FALSE == mali_group_is_virtual(groups[i])) { + MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i], + EXEC_STATE_INACTIVE)); + + mali_executor_change_state_pp_physical(groups[i], + &group_list_inactive, + &group_list_inactive_count, + &group_list_idle, + &group_list_idle_count); + } + + do_schedule = MALI_TRUE; + } + + if (mali_executor_has_virtual_group() && + MALI_TRUE == child_groups_activated && + MALI_GROUP_STATE_ACTIVATION_PENDING == + mali_group_get_state(virtual_group)) { + /* + * Try to active virtual group while it may be not sucessful every time, + * because there is one situation that not all of child groups are powered on + * in one time and virtual group is in activation pending state. + */ + if (mali_group_set_active(virtual_group)) { + /* Move group from inactive to idle */ + MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE == + virtual_group_state); + virtual_group_state = EXEC_STATE_IDLE; + + MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated, 1 virtual activated.\n", num_groups, num_activated)); + } else { + MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated)); + } + } else { + MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated)); + } + + if (MALI_TRUE == do_schedule) { + /* Trigger a schedule */ + mali_executor_schedule(); + } + + mali_executor_unlock(); +} + +void mali_executor_group_power_down(struct mali_group *groups[], + u32 num_groups) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(groups); + MALI_DEBUG_ASSERT(0 < num_groups); + + mali_executor_lock(); + + MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups)); + + for (i = 0; i < num_groups; i++) { + /* Groups must be either disabled or inactive. while for virtual group, + * it maybe in empty state, because when we meet pm_runtime_suspend, + * virtual group could be powered off, and before we acquire mali_executor_lock, + * we must release mali_pm_state_lock, if there is a new physical job was queued, + * all of physical groups in virtual group could be pulled out, so we only can + * powered down an empty virtual group. Those physical groups will be powered + * up in following pm_runtime_resume callback function. + */ + MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i], + EXEC_STATE_DISABLED) || + mali_executor_group_is_in_state(groups[i], + EXEC_STATE_INACTIVE) || + mali_executor_group_is_in_state(groups[i], + EXEC_STATE_EMPTY)); + + MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n", + mali_group_core_description(groups[i]))); + + mali_group_power_down(groups[i]); + } + + MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups)); + + mali_executor_unlock(); +} + +void mali_executor_abort_session(struct mali_session_data *session) +{ + struct mali_group *group; + struct mali_group *tmp_group; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT(session->is_aborting); + + MALI_DEBUG_PRINT(3, + ("Executor: Aborting all jobs from session 0x%08X.\n", + session)); + + mali_executor_lock(); + + if (mali_group_get_session(gp_group) == session) { + if (EXEC_STATE_WORKING == gp_group_state) { + struct mali_gp_job *gp_job = NULL; + + mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL); + + MALI_DEBUG_ASSERT_POINTER(gp_job); + + /* GP job completed, make sure it is freed */ + mali_scheduler_complete_gp_job(gp_job, MALI_FALSE, + MALI_FALSE, MALI_TRUE); + } else { + /* Same session, but not working, so just clear it */ + mali_group_clear_session(gp_group); + } + } + + if (mali_executor_has_virtual_group()) { + if (EXEC_STATE_WORKING == virtual_group_state + && mali_group_get_session(virtual_group) == session) { + struct mali_pp_job *pp_job = NULL; + + mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job); + + if (NULL != pp_job) { + /* PP job completed, make sure it is freed */ + mali_scheduler_complete_pp_job(pp_job, 0, + MALI_FALSE, MALI_TRUE); + } + } + } + + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, + struct mali_group, executor_list) { + if (mali_group_get_session(group) == session) { + struct mali_pp_job *pp_job = NULL; + + mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job); + + if (NULL != pp_job) { + /* PP job completed, make sure it is freed */ + mali_scheduler_complete_pp_job(pp_job, 0, + MALI_FALSE, MALI_TRUE); + } + } + } + + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) { + mali_group_clear_session(group); + } + + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) { + mali_group_clear_session(group); + } + + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) { + mali_group_clear_session(group); + } + + mali_executor_unlock(); +} + + +void mali_executor_core_scaling_enable(void) +{ + /* PS: Core scaling is by default enabled */ + core_scaling_enabled = MALI_TRUE; +} + +void mali_executor_core_scaling_disable(void) +{ + core_scaling_enabled = MALI_FALSE; +} + +mali_bool mali_executor_core_scaling_is_enabled(void) +{ + return core_scaling_enabled; +} + +void mali_executor_group_enable(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + + mali_executor_lock(); + + if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group)) + && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) { + mali_executor_group_enable_internal(group); + } + + mali_executor_schedule(); + mali_executor_unlock(); + + _mali_osk_wq_schedule_work(executor_wq_notify_core_change); +} + +/* + * If a physical group is inactive or idle, we should disable it immediately, + * if group is in virtual, and virtual group is idle, disable given physical group in it. + */ +void mali_executor_group_disable(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + + mali_executor_lock(); + + if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group)) + && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) { + mali_executor_group_disable_internal(group); + } + + mali_executor_schedule(); + mali_executor_unlock(); + + _mali_osk_wq_schedule_work(executor_wq_notify_core_change); +} + +mali_bool mali_executor_group_is_disabled(struct mali_group *group) +{ + /* NB: This function is not optimized for time critical usage */ + + mali_bool ret; + + MALI_DEBUG_ASSERT_POINTER(group); + + mali_executor_lock(); + ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED); + mali_executor_unlock(); + + return ret; +} + +int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override) +{ + if (target_core_nr == num_physical_pp_cores_enabled) return 0; + if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM; + if (target_core_nr > num_physical_pp_cores_total) return -EINVAL; + if (0 == target_core_nr) return -EINVAL; + + mali_executor_core_scale(target_core_nr); + + _mali_osk_wq_schedule_work(executor_wq_notify_core_change); + + return 0; +} + +#if MALI_STATE_TRACKING +u32 mali_executor_dump_state(char *buf, u32 size) +{ + int n = 0; + struct mali_group *group; + struct mali_group *temp; + + mali_executor_lock(); + + switch (gp_group_state) { + case EXEC_STATE_INACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "GP group is in state INACTIVE\n"); + break; + case EXEC_STATE_IDLE: + n += _mali_osk_snprintf(buf + n, size - n, + "GP group is in state IDLE\n"); + break; + case EXEC_STATE_WORKING: + n += _mali_osk_snprintf(buf + n, size - n, + "GP group is in state WORKING\n"); + break; + default: + n += _mali_osk_snprintf(buf + n, size - n, + "GP group is in unknown/illegal state %u\n", + gp_group_state); + break; + } + + n += mali_group_dump_state(gp_group, buf + n, size - n); + + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP groups in WORKING state (count = %u):\n", + group_list_working_count); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) { + n += mali_group_dump_state(group, buf + n, size - n); + } + + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP groups in IDLE state (count = %u):\n", + group_list_idle_count); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) { + n += mali_group_dump_state(group, buf + n, size - n); + } + + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP groups in INACTIVE state (count = %u):\n", + group_list_inactive_count); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) { + n += mali_group_dump_state(group, buf + n, size - n); + } + + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP groups in DISABLED state (count = %u):\n", + group_list_disabled_count); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) { + n += mali_group_dump_state(group, buf + n, size - n); + } + + if (mali_executor_has_virtual_group()) { + switch (virtual_group_state) { + case EXEC_STATE_EMPTY: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in state EMPTY\n"); + break; + case EXEC_STATE_INACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in state INACTIVE\n"); + break; + case EXEC_STATE_IDLE: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in state IDLE\n"); + break; + case EXEC_STATE_WORKING: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in state WORKING\n"); + break; + default: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in unknown/illegal state %u\n", + virtual_group_state); + break; + } + + n += mali_group_dump_state(virtual_group, buf + n, size - n); + } + + mali_executor_unlock(); + + n += _mali_osk_snprintf(buf + n, size - n, "\n"); + + return n; +} +#endif + +_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + args->number_of_total_cores = num_physical_pp_cores_total; + args->number_of_enabled_cores = num_physical_pp_cores_enabled; + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + args->version = pp_version; + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + args->number_of_cores = 1; + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + args->version = gp_version; + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args) +{ + struct mali_session_data *session; + struct mali_gp_job *job; + + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + + if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) { + _mali_osk_notification_t *new_notification = NULL; + + new_notification = _mali_osk_notification_create( + _MALI_NOTIFICATION_GP_STALLED, + sizeof(_mali_uk_gp_job_suspended_s)); + + if (NULL != new_notification) { + MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n", + args->cookie, args->arguments[0], args->arguments[1])); + + mali_executor_lock(); + + /* Resume the job in question if it is still running */ + job = mali_group_get_running_gp_job(gp_group); + if (NULL != job && + args->cookie == mali_gp_job_get_id(job) && + session == mali_gp_job_get_session(job)) { + /* + * Correct job is running, resume with new heap + */ + + mali_gp_job_set_oom_notification(job, + new_notification); + + /* This will also re-enable interrupts */ + mali_group_resume_gp_with_new_heap(gp_group, + args->cookie, + args->arguments[0], + args->arguments[1]); + + job->heap_base_addr = args->arguments[0]; + job->heap_current_addr = args->arguments[0]; + + mali_executor_unlock(); + return _MALI_OSK_ERR_OK; + } else { + MALI_PRINT_ERROR(("Executor: Unable to resume, GP job no longer running.\n")); + + _mali_osk_notification_delete(new_notification); + + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n")); + } + } else { + MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie)); + } + + mali_executor_lock(); + + /* Abort the job in question if it is still running */ + job = mali_group_get_running_gp_job(gp_group); + if (NULL != job && + args->cookie == mali_gp_job_get_id(job) && + session == mali_gp_job_get_session(job)) { + /* Correct job is still running */ + struct mali_gp_job *job_done = NULL; + + mali_executor_complete_group(gp_group, MALI_FALSE, &job_done, NULL); + + /* The same job should have completed */ + MALI_DEBUG_ASSERT(job_done == job); + + /* GP job completed, make sure it is freed */ + mali_scheduler_complete_gp_job(job_done, MALI_FALSE, + MALI_TRUE, MALI_TRUE); + } + + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; +} + + +/* + * ---------- Implementation of static functions ---------- + */ + +void mali_executor_lock(void) +{ + _mali_osk_spinlock_irq_lock(mali_executor_lock_obj); + MALI_DEBUG_PRINT(5, ("Executor: lock taken\n")); +} + +void mali_executor_unlock(void) +{ + MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n")); + _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj); +} + +static mali_bool mali_executor_is_suspended(void *data) +{ + mali_bool ret; + + /* This callback does not use the data pointer. */ + MALI_IGNORE(data); + + mali_executor_lock(); + + ret = pause_count > 0 && !mali_executor_is_working(); + + mali_executor_unlock(); + + return ret; +} + +static mali_bool mali_executor_is_working() +{ + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + return (0 != group_list_working_count || + EXEC_STATE_WORKING == gp_group_state || + EXEC_STATE_WORKING == virtual_group_state); +} + +static void mali_executor_disable_empty_virtual(void) +{ + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY); + MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING); + + if (mali_group_is_empty(virtual_group)) { + virtual_group_state = EXEC_STATE_EMPTY; + } +} + +static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group) +{ + mali_bool trigger_pm_update = MALI_FALSE; + + MALI_DEBUG_ASSERT_POINTER(group); + /* Only rejoining after job has completed (still active) */ + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == + mali_group_get_state(group)); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group()); + MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group)); + + /* Make sure group and virtual group have same status */ + + if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) { + if (mali_group_deactivate(group)) { + trigger_pm_update = MALI_TRUE; + } + + if (virtual_group_state == EXEC_STATE_EMPTY) { + virtual_group_state = EXEC_STATE_INACTIVE; + } + } else if (MALI_GROUP_STATE_ACTIVATION_PENDING == + mali_group_get_state(virtual_group)) { + /* + * Activation is pending for virtual group, leave + * this child group as active. + */ + if (virtual_group_state == EXEC_STATE_EMPTY) { + virtual_group_state = EXEC_STATE_INACTIVE; + } + } else { + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == + mali_group_get_state(virtual_group)); + + if (virtual_group_state == EXEC_STATE_EMPTY) { + virtual_group_state = EXEC_STATE_IDLE; + } + } + + /* Remove group from idle list */ + MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, + EXEC_STATE_IDLE)); + _mali_osk_list_delinit(&group->executor_list); + group_list_idle_count--; + + /* + * And finally rejoin the virtual group + * group will start working on same job as virtual_group, + * if virtual_group is working on a job + */ + mali_group_add_group(virtual_group, group); + + return trigger_pm_update; +} + +static mali_bool mali_executor_has_virtual_group(void) +{ +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE; +#else + return MALI_FALSE; +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ +} + +static mali_bool mali_executor_virtual_group_is_usable(void) +{ +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return ((EXEC_STATE_INACTIVE == virtual_group_state || + EXEC_STATE_IDLE == virtual_group_state) && (virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ? + MALI_TRUE : MALI_FALSE; +#else + return MALI_FALSE; +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ +} + +static mali_bool mali_executor_tackle_gp_bound(void) +{ + struct mali_pp_job *job; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + job = mali_scheduler_job_pp_physical_peek(); + + if (NULL != job && MALI_TRUE == mali_is_mali400()) { + if (0 < group_list_working_count && + mali_pp_job_is_large_and_unstarted(job)) { + return MALI_TRUE; + } + } + + return MALI_FALSE; +} + +/* + * This is where jobs are actually started. + */ +static void mali_executor_schedule(void) +{ + u32 i; + u32 num_physical_needed = 0; + u32 num_physical_to_process = 0; + mali_bool trigger_pm_update = MALI_FALSE; + mali_bool deactivate_idle_group = MALI_TRUE; + + /* Physical groups + jobs to start in this function */ + struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + int num_jobs_to_start = 0; + + /* Virtual job to start in this function */ + struct mali_pp_job *virtual_job_to_start = NULL; + + /* GP job to start in this function */ + struct mali_gp_job *gp_job_to_start = NULL; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (pause_count > 0) { + /* Execution is suspended, don't schedule any jobs. */ + return; + } + + /* Lock needed in order to safely handle the job queues */ + mali_scheduler_lock(); + + /* 1. Activate gp firstly if have gp job queued. */ + if (EXEC_STATE_INACTIVE == gp_group_state && + 0 < mali_scheduler_job_gp_count()) { + + enum mali_group_state state = + mali_group_activate(gp_group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Set GP group state to idle */ + gp_group_state = EXEC_STATE_IDLE; + } else { + trigger_pm_update = MALI_TRUE; + } + } + + /* 2. Prepare as many physical groups as needed/possible */ + + num_physical_needed = mali_scheduler_job_physical_head_count(); + + /* On mali-450 platform, we don't need to enter in this block frequently. */ + if (0 < num_physical_needed) { + + if (num_physical_needed <= group_list_idle_count) { + /* We have enough groups on idle list already */ + num_physical_to_process = num_physical_needed; + num_physical_needed = 0; + } else { + /* We need to get a hold of some more groups */ + num_physical_to_process = group_list_idle_count; + num_physical_needed -= group_list_idle_count; + } + + if (0 < num_physical_needed) { + + /* 2.1. Activate groups which are inactive */ + + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, + struct mali_group, executor_list) { + enum mali_group_state state = + mali_group_activate(group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Move from inactive to idle */ + mali_executor_change_state_pp_physical(group, + &group_list_inactive, + &group_list_inactive_count, + &group_list_idle, + &group_list_idle_count); + num_physical_to_process++; + } else { + trigger_pm_update = MALI_TRUE; + } + + num_physical_needed--; + if (0 == num_physical_needed) { + /* We have activated all the groups we need */ + break; + } + } + } + + if (mali_executor_virtual_group_is_usable()) { + + /* + * 2.2. And finally, steal and activate groups + * from virtual group if we need even more + */ + while (0 < num_physical_needed) { + struct mali_group *group; + + group = mali_group_acquire_group(virtual_group); + if (NULL != group) { + enum mali_group_state state; + + mali_executor_disable_empty_virtual(); + + state = mali_group_activate(group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Group is ready, add to idle list */ + _mali_osk_list_add( + &group->executor_list, + &group_list_idle); + group_list_idle_count++; + num_physical_to_process++; + } else { + /* + * Group is not ready yet, + * add to inactive list + */ + _mali_osk_list_add( + &group->executor_list, + &group_list_inactive); + group_list_inactive_count++; + + trigger_pm_update = MALI_TRUE; + } + num_physical_needed--; + } else { + /* + * We could not get enough groups + * from the virtual group. + */ + break; + } + } + } + + /* 2.3. Assign physical jobs to groups */ + + if (0 < num_physical_to_process) { + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, + struct mali_group, executor_list) { + struct mali_pp_job *job = NULL; + u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + + MALI_DEBUG_ASSERT(num_jobs_to_start < + MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS); + + MALI_DEBUG_ASSERT(0 < + mali_scheduler_job_physical_head_count()); + + if (mali_executor_hint_is_enabled( + MALI_EXECUTOR_HINT_GP_BOUND)) { + if (MALI_TRUE == mali_executor_tackle_gp_bound()) { + /* + * We're gp bound, + * don't start this right now. + */ + deactivate_idle_group = MALI_FALSE; + num_physical_to_process = 0; + break; + } + } + + job = mali_scheduler_job_pp_physical_get( + &sub_job); + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS); + + /* Put job + group on list of jobs to start later on */ + + groups_to_start[num_jobs_to_start] = group; + jobs_to_start[num_jobs_to_start] = job; + sub_jobs_to_start[num_jobs_to_start] = sub_job; + num_jobs_to_start++; + + /* Move group from idle to working */ + mali_executor_change_state_pp_physical(group, + &group_list_idle, + &group_list_idle_count, + &group_list_working, + &group_list_working_count); + + num_physical_to_process--; + if (0 == num_physical_to_process) { + /* Got all we needed */ + break; + } + } + } + } + + + /* 3. Deactivate idle pp group , must put deactive here before active vitual group + * for cover case first only has physical job in normal queue but group inactive, + * so delay the job start go to active group, when group activated, + * call scheduler again, but now if we get high queue virtual job, + * we will do nothing in schedule cause executor schedule stop + */ + + if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group + && (!mali_timeline_has_physical_pp_job()))) { + trigger_pm_update = MALI_TRUE; + } + + /* 4. Activate virtual group, if needed */ + + if (EXEC_STATE_INACTIVE == virtual_group_state && + 0 < mali_scheduler_job_next_is_virtual()) { + enum mali_group_state state = + mali_group_activate(virtual_group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Set virtual group state to idle */ + virtual_group_state = EXEC_STATE_IDLE; + } else { + trigger_pm_update = MALI_TRUE; + } + } + + /* 5. To power up group asap, we trigger pm update here. */ + + if (MALI_TRUE == trigger_pm_update) { + trigger_pm_update = MALI_FALSE; + mali_pm_update_async(); + } + + /* 6. Assign jobs to idle virtual group (or deactivate if no job) */ + + if (EXEC_STATE_IDLE == virtual_group_state) { + if (0 < mali_scheduler_job_next_is_virtual()) { + virtual_job_to_start = + mali_scheduler_job_pp_virtual_get(); + virtual_group_state = EXEC_STATE_WORKING; + } else if (!mali_timeline_has_virtual_pp_job()) { + virtual_group_state = EXEC_STATE_INACTIVE; + + if (mali_group_deactivate(virtual_group)) { + trigger_pm_update = MALI_TRUE; + } + } + } + + /* 7. Assign job to idle GP group (or deactivate if no job) */ + + if (EXEC_STATE_IDLE == gp_group_state) { + if (0 < mali_scheduler_job_gp_count()) { + gp_job_to_start = mali_scheduler_job_gp_get(); + gp_group_state = EXEC_STATE_WORKING; + } else if (!mali_timeline_has_gp_job()) { + gp_group_state = EXEC_STATE_INACTIVE; + if (mali_group_deactivate(gp_group)) { + trigger_pm_update = MALI_TRUE; + } + } + } + + /* 8. We no longer need the schedule/queue lock */ + + mali_scheduler_unlock(); + + /* 9. start jobs */ + + if (NULL != virtual_job_to_start) { + MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group)); + mali_group_start_pp_job(virtual_group, + virtual_job_to_start, 0); + } + + for (i = 0; i < num_jobs_to_start; i++) { + MALI_DEBUG_ASSERT(!mali_group_pp_is_active( + groups_to_start[i])); + mali_group_start_pp_job(groups_to_start[i], + jobs_to_start[i], + sub_jobs_to_start[i]); + } + + MALI_DEBUG_ASSERT_POINTER(gp_group); + + if (NULL != gp_job_to_start) { + MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group)); + mali_group_start_gp_job(gp_group, gp_job_to_start); + } + + /* 10. Trigger any pending PM updates */ + if (MALI_TRUE == trigger_pm_update) { + mali_pm_update_async(); + } +} + +/* Handler for deferred schedule requests */ +static void mali_executor_wq_schedule(void *arg) +{ + MALI_IGNORE(arg); + mali_executor_lock(); + mali_executor_schedule(); + mali_executor_unlock(); +} + +static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job, u32 added_size) +{ + _mali_uk_gp_job_suspended_s *jobres; + _mali_osk_notification_t *notification; + + notification = mali_gp_job_get_oom_notification(job); + + /* + * Remember the id we send to user space, so we have something to + * verify when we get a response + */ + gp_returned_cookie = mali_gp_job_get_id(job); + + jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer; + jobres->user_job_ptr = mali_gp_job_get_user_id(job); + jobres->cookie = gp_returned_cookie; + jobres->heap_added_size = added_size; + mali_session_send_notification(mali_gp_job_get_session(job), + notification); +} +static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group, + mali_bool success) +{ + struct mali_gp_job *job; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* Extracts the needed HW status from core and reset */ + job = mali_group_complete_gp(group, success); + + MALI_DEBUG_ASSERT_POINTER(job); + + /* Core is now ready to go into idle list */ + gp_group_state = EXEC_STATE_IDLE; + + /* This will potentially queue more GP and PP jobs */ + mali_timeline_tracker_release(&job->tracker); + + /* Signal PP job */ + mali_gp_job_signal_pp_tracker(job, success); + + return job; +} + +static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group, + mali_bool success) +{ + struct mali_pp_job *job; + u32 sub_job; + mali_bool job_is_done; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* Extracts the needed HW status from core and reset */ + job = mali_group_complete_pp(group, success, &sub_job); + + MALI_DEBUG_ASSERT_POINTER(job); + + /* Core is now ready to go into idle list */ + if (mali_group_is_virtual(group)) { + virtual_group_state = EXEC_STATE_IDLE; + } else { + /* Move from working to idle state */ + mali_executor_change_state_pp_physical(group, + &group_list_working, + &group_list_working_count, + &group_list_idle, + &group_list_idle_count); + } + + /* It is the executor module which owns the jobs themselves by now */ + mali_pp_job_mark_sub_job_completed(job, success); + job_is_done = mali_pp_job_is_complete(job); + + if (job_is_done) { + /* This will potentially queue more GP and PP jobs */ + mali_timeline_tracker_release(&job->tracker); + } + + return job; +} + +static void mali_executor_complete_group(struct mali_group *group, + mali_bool success, + struct mali_gp_job **gp_job_done, + struct mali_pp_job **pp_job_done) +{ + struct mali_gp_core *gp_core = mali_group_get_gp_core(group); + struct mali_pp_core *pp_core = mali_group_get_pp_core(group); + struct mali_gp_job *gp_job = NULL; + struct mali_pp_job *pp_job = NULL; + mali_bool pp_job_is_done = MALI_TRUE; + + if (NULL != gp_core) { + gp_job = mali_executor_complete_gp(group, success); + } else { + MALI_DEBUG_ASSERT_POINTER(pp_core); + MALI_IGNORE(pp_core); + pp_job = mali_executor_complete_pp(group, success); + + pp_job_is_done = mali_pp_job_is_complete(pp_job); + } + + if (pause_count > 0) { + /* Execution has been suspended */ + + if (!mali_executor_is_working()) { + /* Last job completed, wake up sleepers */ + _mali_osk_wait_queue_wake_up( + executor_working_wait_queue); + } + } else if (MALI_TRUE == mali_group_disable_requested(group)) { + mali_executor_core_scale_in_group_complete(group); + + mali_executor_schedule(); + } else { + /* try to schedule new jobs */ + mali_executor_schedule(); + } + + if (NULL != gp_job) { + MALI_DEBUG_ASSERT_POINTER(gp_job_done); + *gp_job_done = gp_job; + } else if (pp_job_is_done) { + MALI_DEBUG_ASSERT_POINTER(pp_job); + MALI_DEBUG_ASSERT_POINTER(pp_job_done); + *pp_job_done = pp_job; + } +} + +static void mali_executor_change_state_pp_physical(struct mali_group *group, + _mali_osk_list_t *old_list, + u32 *old_count, + _mali_osk_list_t *new_list, + u32 *new_count) +{ + /* + * It's a bit more complicated to change the state for the physical PP + * groups since their state is determined by the list they are on. + */ +#if defined(DEBUG) + mali_bool found = MALI_FALSE; + struct mali_group *group_iter; + struct mali_group *temp; + u32 old_counted = 0; + u32 new_counted = 0; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(old_list); + MALI_DEBUG_ASSERT_POINTER(old_count); + MALI_DEBUG_ASSERT_POINTER(new_list); + MALI_DEBUG_ASSERT_POINTER(new_count); + + /* + * Verify that group is present on old list, + * and that the count is correct + */ + + _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list, + struct mali_group, executor_list) { + old_counted++; + if (group == group_iter) { + found = MALI_TRUE; + } + } + + _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list, + struct mali_group, executor_list) { + new_counted++; + } + + if (MALI_FALSE == found) { + if (old_list == &group_list_idle) { + MALI_DEBUG_PRINT(1, (" old Group list is idle,")); + } else if (old_list == &group_list_inactive) { + MALI_DEBUG_PRINT(1, (" old Group list is inactive,")); + } else if (old_list == &group_list_working) { + MALI_DEBUG_PRINT(1, (" old Group list is working,")); + } else if (old_list == &group_list_disabled) { + MALI_DEBUG_PRINT(1, (" old Group list is disable,")); + } + + if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) { + MALI_DEBUG_PRINT(1, (" group in working \n")); + } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) { + MALI_DEBUG_PRINT(1, (" group in inactive \n")); + } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) { + MALI_DEBUG_PRINT(1, (" group in idle \n")); + } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) { + MALI_DEBUG_PRINT(1, (" but group in disabled \n")); + } + } + + MALI_DEBUG_ASSERT(MALI_TRUE == found); + MALI_DEBUG_ASSERT(0 < (*old_count)); + MALI_DEBUG_ASSERT((*old_count) == old_counted); + MALI_DEBUG_ASSERT((*new_count) == new_counted); +#endif + + _mali_osk_list_move(&group->executor_list, new_list); + (*old_count)--; + (*new_count)++; +} + +static void mali_executor_set_state_pp_physical(struct mali_group *group, + _mali_osk_list_t *new_list, + u32 *new_count) +{ + _mali_osk_list_add(&group->executor_list, new_list); + (*new_count)++; +} + +static mali_bool mali_executor_group_is_in_state(struct mali_group *group, + enum mali_executor_state_t state) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (gp_group == group) { + if (gp_group_state == state) { + return MALI_TRUE; + } + } else if (virtual_group == group || mali_group_is_in_virtual(group)) { + if (virtual_group_state == state) { + return MALI_TRUE; + } + } else { + /* Physical PP group */ + struct mali_group *group_iter; + struct mali_group *temp; + _mali_osk_list_t *list; + + if (EXEC_STATE_DISABLED == state) { + list = &group_list_disabled; + } else if (EXEC_STATE_INACTIVE == state) { + list = &group_list_inactive; + } else if (EXEC_STATE_IDLE == state) { + list = &group_list_idle; + } else { + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state); + list = &group_list_working; + } + + _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list, + struct mali_group, executor_list) { + if (group_iter == group) { + return MALI_TRUE; + } + } + } + + /* group not in correct state */ + return MALI_FALSE; +} + +static void mali_executor_group_enable_internal(struct mali_group *group) +{ + MALI_DEBUG_ASSERT(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)); + + /* Put into inactive state (== "lowest" enabled state) */ + if (group == gp_group) { + MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state); + gp_group_state = EXEC_STATE_INACTIVE; + } else { + mali_executor_change_state_pp_physical(group, + &group_list_disabled, + &group_list_disabled_count, + &group_list_inactive, + &group_list_inactive_count); + + ++num_physical_pp_cores_enabled; + MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id)); + } + + if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) { + MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group)); + + /* Move from inactive to idle */ + if (group == gp_group) { + gp_group_state = EXEC_STATE_IDLE; + } else { + mali_executor_change_state_pp_physical(group, + &group_list_inactive, + &group_list_inactive_count, + &group_list_idle, + &group_list_idle_count); + + if (mali_executor_has_virtual_group()) { + if (mali_executor_physical_rejoin_virtual(group)) { + mali_pm_update_async(); + } + } + } + } else { + mali_pm_update_async(); + } +} + +static void mali_executor_group_disable_internal(struct mali_group *group) +{ + mali_bool working; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)); + + working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING); + if (MALI_TRUE == working) { + /** Group to be disabled once it completes current work, + * when virtual group completes, also check child groups for this flag */ + mali_group_set_disable_request(group, MALI_TRUE); + return; + } + + /* Put into disabled state */ + if (group == gp_group) { + /* GP group */ + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state); + gp_group_state = EXEC_STATE_DISABLED; + } else { + if (mali_group_is_in_virtual(group)) { + /* A child group of virtual group. move the specific group from virtual group */ + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state); + + mali_executor_set_state_pp_physical(group, + &group_list_disabled, + &group_list_disabled_count); + + mali_group_remove_group(virtual_group, group); + mali_executor_disable_empty_virtual(); + } else { + mali_executor_change_group_status_disabled(group); + } + + --num_physical_pp_cores_enabled; + MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id)); + } + + if (MALI_GROUP_STATE_INACTIVE != group->state) { + if (MALI_TRUE == mali_group_deactivate(group)) { + mali_pm_update_async(); + } + } +} + +static void mali_executor_notify_core_change(u32 num_cores) +{ + mali_bool done = MALI_FALSE; + + if (mali_is_mali450() || mali_is_mali470()) { + return; + } + + /* + * This function gets a bit complicated because we can't hold the session lock while + * allocating notification objects. + */ + while (!done) { + u32 i; + u32 num_sessions_alloc; + u32 num_sessions_with_lock; + u32 used_notification_objects = 0; + _mali_osk_notification_t **notobjs; + + /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */ + num_sessions_alloc = mali_session_get_count(); + if (0 == num_sessions_alloc) { + /* No sessions to report to */ + return; + } + + notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc); + if (NULL == notobjs) { + MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n")); + /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */ + return; + } + + for (i = 0; i < num_sessions_alloc; i++) { + notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s)); + if (NULL != notobjs[i]) { + _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer; + data->number_of_enabled_cores = num_cores; + } else { + MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i)); + } + } + + mali_session_lock(); + + /* number of sessions will not change while we hold the lock */ + num_sessions_with_lock = mali_session_get_count(); + + if (num_sessions_alloc >= num_sessions_with_lock) { + /* We have allocated enough notification objects for all the sessions atm */ + struct mali_session_data *session, *tmp; + MALI_SESSION_FOREACH(session, tmp, link) { + MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc); + if (NULL != notobjs[used_notification_objects]) { + mali_session_send_notification(session, notobjs[used_notification_objects]); + notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */ + } + used_notification_objects++; + } + done = MALI_TRUE; + } + + mali_session_unlock(); + + /* Delete any remaining/unused notification objects */ + for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) { + if (NULL != notobjs[used_notification_objects]) { + _mali_osk_notification_delete(notobjs[used_notification_objects]); + } + } + + _mali_osk_free(notobjs); + } +} + +static mali_bool mali_executor_core_scaling_is_done(void *data) +{ + u32 i; + u32 num_groups; + mali_bool ret = MALI_TRUE; + + MALI_IGNORE(data); + + mali_executor_lock(); + + num_groups = mali_group_get_glob_num_groups(); + + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + if (NULL != group) { + if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) { + ret = MALI_FALSE; + break; + } + } + } + mali_executor_unlock(); + + return ret; +} + +static void mali_executor_wq_notify_core_change(void *arg) +{ + MALI_IGNORE(arg); + + if (mali_is_mali450() || mali_is_mali470()) { + return; + } + + _mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue, + mali_executor_core_scaling_is_done, NULL); + + mali_executor_notify_core_change(num_physical_pp_cores_enabled); +} + +/** + * Clear all disable request from the _last_ core scaling behavior. + */ +static void mali_executor_core_scaling_reset(void) +{ + u32 i; + u32 num_groups; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + num_groups = mali_group_get_glob_num_groups(); + + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + if (NULL != group) { + group->disable_requested = MALI_FALSE; + } + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + core_scaling_delay_up_mask[i] = 0; + } +} + +static void mali_executor_core_scale(unsigned int target_core_nr) +{ + int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 }; + int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 }; + int i; + + MALI_DEBUG_ASSERT(0 < target_core_nr); + MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr); + + mali_executor_lock(); + + if (target_core_nr < num_physical_pp_cores_enabled) { + MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr)); + } else { + MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled)); + } + + /* When a new core scaling request is comming, we should remove the un-doing + * part of the last core scaling request. It's safe because we have only one + * lock(executor lock) protection. */ + mali_executor_core_scaling_reset(); + + mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask); + mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask); + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i]; + MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i])); + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (0 > target_core_scaling_mask[i]) { + struct mali_pm_domain *domain; + + domain = mali_pm_domain_get_from_index(i); + + /* Domain is valid and has pp cores */ + if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) { + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) { + if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) + && (!mali_group_is_virtual(group))) { + mali_executor_group_disable_internal(group); + target_core_scaling_mask[i]++; + if ((0 == target_core_scaling_mask[i])) { + break; + } + + } + } + } + } + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + /** + * Target_core_scaling_mask[i] is bigger than 0, + * means we need to enable some pp cores in + * this domain whose domain index is i. + */ + if (0 < target_core_scaling_mask[i]) { + struct mali_pm_domain *domain; + + if (num_physical_pp_cores_enabled >= target_core_nr) { + break; + } + + domain = mali_pm_domain_get_from_index(i); + + /* Domain is valid and has pp cores */ + if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) { + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) { + if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED) + && (!mali_group_is_virtual(group))) { + mali_executor_group_enable_internal(group); + target_core_scaling_mask[i]--; + + if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) { + break; + } + } + } + } + } + } + + /** + * Here, we may still have some pp cores not been enabled because of some + * pp cores need to be disabled are still in working state. + */ + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (0 < target_core_scaling_mask[i]) { + core_scaling_delay_up_mask[i] = target_core_scaling_mask[i]; + } + } + + mali_executor_schedule(); + mali_executor_unlock(); +} + +static void mali_executor_core_scale_in_group_complete(struct mali_group *group) +{ + int num_pp_cores_disabled = 0; + int num_pp_cores_to_enable = 0; + int i; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group)); + + /* Disable child group of virtual group */ + if (mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + if (MALI_TRUE == mali_group_disable_requested(child)) { + mali_group_set_disable_request(child, MALI_FALSE); + mali_executor_group_disable_internal(child); + num_pp_cores_disabled++; + } + } + mali_group_set_disable_request(group, MALI_FALSE); + } else { + mali_executor_group_disable_internal(group); + mali_group_set_disable_request(group, MALI_FALSE); + if (NULL != mali_group_get_pp_core(group)) { + num_pp_cores_disabled++; + } + } + + num_pp_cores_to_enable = num_pp_cores_disabled; + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (0 < core_scaling_delay_up_mask[i]) { + struct mali_pm_domain *domain; + + if (0 == num_pp_cores_to_enable) { + break; + } + + domain = mali_pm_domain_get_from_index(i); + + /* Domain is valid and has pp cores */ + if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) { + struct mali_group *disabled_group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) { + if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) { + mali_executor_group_enable_internal(disabled_group); + core_scaling_delay_up_mask[i]--; + num_pp_cores_to_enable--; + + if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) { + break; + } + } + } + } + } + } + + _mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue); +} + +static void mali_executor_change_group_status_disabled(struct mali_group *group) +{ + /* Physical PP group */ + mali_bool idle; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE); + if (MALI_TRUE == idle) { + mali_executor_change_state_pp_physical(group, + &group_list_idle, + &group_list_idle_count, + &group_list_disabled, + &group_list_disabled_count); + } else { + mali_executor_change_state_pp_physical(group, + &group_list_inactive, + &group_list_inactive_count, + &group_list_disabled, + &group_list_disabled_count); + } +} + +static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group) +{ + mali_bool trigger_pm_update = MALI_FALSE; + + if (group_list_idle_count > 0) { + if (mali_executor_has_virtual_group()) { + + /* Rejoin virtual group on Mali-450 */ + + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, + &group_list_idle, + struct mali_group, executor_list) { + if (mali_executor_physical_rejoin_virtual( + group)) { + trigger_pm_update = MALI_TRUE; + } + } + } else if (deactivate_idle_group) { + struct mali_group *group; + struct mali_group *temp; + + /* Deactivate group on Mali-300/400 */ + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, + &group_list_idle, + struct mali_group, executor_list) { + if (mali_group_deactivate(group)) { + trigger_pm_update = MALI_TRUE; + } + + /* Move from idle to inactive */ + mali_executor_change_state_pp_physical(group, + &group_list_idle, + &group_list_idle_count, + &group_list_inactive, + &group_list_inactive_count); + } + } + } + + return trigger_pm_update; +} + +void mali_executor_running_status_print(void) +{ + struct mali_group *group = NULL; + struct mali_group *temp = NULL; + + MALI_PRINT(("GP running job: %p\n", gp_group->gp_running_job)); + if ((gp_group->gp_core) && (gp_group->is_working)) { + mali_group_dump_status(gp_group); + } + MALI_PRINT(("Physical PP groups in WORKING state (count = %u):\n", group_list_working_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) { + MALI_PRINT(("PP running job: %p, subjob %d \n", group->pp_running_job, group->pp_running_sub_job)); + mali_group_dump_status(group); + } + MALI_PRINT(("Physical PP groups in INACTIVE state (count = %u):\n", group_list_inactive_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + MALI_PRINT(("Physical PP groups in IDLE state (count = %u):\n", group_list_idle_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + MALI_PRINT(("Physical PP groups in DISABLED state (count = %u):\n", group_list_disabled_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + + if (mali_executor_has_virtual_group()) { + MALI_PRINT(("Virtual group running job: %p\n", virtual_group->pp_running_job)); + MALI_PRINT(("Virtual group status: %d\n", virtual_group_state)); + MALI_PRINT(("Virtual group->status: %d\n", virtual_group->state)); + MALI_PRINT(("\tSW power: %s\n", virtual_group->power_is_on ? "On" : "Off")); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &virtual_group->group_list, + struct mali_group, group_list) { + int i = 0; + MALI_PRINT(("\tchild group(%s) running job: %p\n", group->pp_core->hw_core.description, group->pp_running_job)); + MALI_PRINT(("\tchild group(%s)->status: %d\n", group->pp_core->hw_core.description, group->state)); + MALI_PRINT(("\tchild group(%s) SW power: %s\n", group->pp_core->hw_core.description, group->power_is_on ? "On" : "Off")); + if (group->pm_domain) { + MALI_PRINT(("\tPower domain: id %u\n", mali_pm_domain_get_id(group->pm_domain))); + MALI_PRINT(("\tMask:0x%04x \n", mali_pm_domain_get_mask(group->pm_domain))); + MALI_PRINT(("\tUse-count:%u \n", mali_pm_domain_get_use_count(group->pm_domain))); + MALI_PRINT(("\tCurrent power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_current_mask()) ? "On" : "Off")); + MALI_PRINT(("\tWanted power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_wanted_mask()) ? "On" : "Off")); + } + + for (i = 0; i < 2; i++) { + if (NULL != group->l2_cache_core[i]) { + struct mali_pm_domain *domain; + domain = mali_l2_cache_get_pm_domain(group->l2_cache_core[i]); + MALI_PRINT(("\t L2(index %d) group SW power: %s\n", i, group->l2_cache_core[i]->power_is_on ? "On" : "Off")); + if (domain) { + MALI_PRINT(("\tL2 Power domain: id %u\n", mali_pm_domain_get_id(domain))); + MALI_PRINT(("\tL2 Mask:0x%04x \n", mali_pm_domain_get_mask(domain))); + MALI_PRINT(("\tL2 Use-count:%u \n", mali_pm_domain_get_use_count(domain))); + MALI_PRINT(("\tL2 Current power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_current_mask()) ? "On" : "Off")); + MALI_PRINT(("\tL2 Wanted power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_wanted_mask()) ? "On" : "Off")); + } + } + } + } + if (EXEC_STATE_WORKING == virtual_group_state) { + mali_group_dump_status(virtual_group); + } + } +} + +void mali_executor_status_dump(void) +{ + mali_executor_lock(); + mali_scheduler_lock(); + + /* print schedule queue status */ + mali_scheduler_gp_pp_job_queue_print(); + + mali_scheduler_unlock(); + mali_executor_unlock(); +} diff --git a/drivers/gpu/arm/utgard/common/mali_executor.h b/drivers/gpu/arm/utgard/common/mali_executor.h new file mode 100644 index 000000000000..a756d3f40c2a --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_executor.h @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2012, 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_EXECUTOR_H__ +#define __MALI_EXECUTOR_H__ + +#include "mali_osk.h" +#include "mali_scheduler_types.h" +#include "mali_kernel_common.h" + +typedef enum { + MALI_EXECUTOR_HINT_GP_BOUND = 0 +#define MALI_EXECUTOR_HINT_MAX 1 +} mali_executor_hint; + +extern mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX]; + +/* forward declare struct instead of using include */ +struct mali_session_data; +struct mali_group; +struct mali_pp_core; + +extern _mali_osk_spinlock_irq_t *mali_executor_lock_obj; + +#define MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj); + +_mali_osk_errcode_t mali_executor_initialize(void); +void mali_executor_terminate(void); + +void mali_executor_populate(void); +void mali_executor_depopulate(void); + +void mali_executor_suspend(void); +void mali_executor_resume(void); + +u32 mali_executor_get_num_cores_total(void); +u32 mali_executor_get_num_cores_enabled(void); +struct mali_pp_core *mali_executor_get_virtual_pp(void); +struct mali_group *mali_executor_get_virtual_group(void); + +void mali_executor_zap_all_active(struct mali_session_data *session); + +/** + * Schedule GP and PP according to bitmask. + * + * @param mask A scheduling bitmask. + * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not. + */ +void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule); + +_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half); +_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half); +_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half); + +void mali_executor_group_oom(struct mali_group *group); +void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups); +void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups); + +void mali_executor_abort_session(struct mali_session_data *session); + +void mali_executor_core_scaling_enable(void); +void mali_executor_core_scaling_disable(void); +mali_bool mali_executor_core_scaling_is_enabled(void); + +void mali_executor_group_enable(struct mali_group *group); +void mali_executor_group_disable(struct mali_group *group); +mali_bool mali_executor_group_is_disabled(struct mali_group *group); + +int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override); + +#if MALI_STATE_TRACKING +u32 mali_executor_dump_state(char *buf, u32 size); +#endif + +MALI_STATIC_INLINE void mali_executor_hint_enable(mali_executor_hint hint) +{ + MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX); + mali_executor_hints[hint] = MALI_TRUE; +} + +MALI_STATIC_INLINE void mali_executor_hint_disable(mali_executor_hint hint) +{ + MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX); + mali_executor_hints[hint] = MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hint) +{ + MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX); + return mali_executor_hints[hint]; +} + +void mali_executor_running_status_print(void); +void mali_executor_status_dump(void); +void mali_executor_lock(void); +void mali_executor_unlock(void); +#endif /* __MALI_EXECUTOR_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_gp.c b/drivers/gpu/arm/utgard/common/mali_gp.c new file mode 100644 index 000000000000..a690781837e9 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_gp.c @@ -0,0 +1,357 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_gp.h" +#include "mali_hw_core.h" +#include "mali_group.h" +#include "mali_osk.h" +#include "regs/mali_gp_regs.h" +#include "mali_kernel_common.h" +#include "mali_kernel_core.h" +#if defined(CONFIG_MALI400_PROFILING) +#include "mali_osk_profiling.h" +#endif + +static struct mali_gp_core *mali_global_gp_core = NULL; + +/* Interrupt handlers */ +static void mali_gp_irq_probe_trigger(void *data); +static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data); + +struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group) +{ + struct mali_gp_core *core = NULL; + + MALI_DEBUG_ASSERT(NULL == mali_global_gp_core); + MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description)); + + core = _mali_osk_malloc(sizeof(struct mali_gp_core)); + if (NULL != core) { + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) { + _mali_osk_errcode_t ret; + + ret = mali_gp_reset(core); + + if (_MALI_OSK_ERR_OK == ret) { + ret = mali_group_add_gp_core(group, core); + if (_MALI_OSK_ERR_OK == ret) { + /* Setup IRQ handlers (which will do IRQ probing if needed) */ + core->irq = _mali_osk_irq_init(resource->irq, + mali_group_upper_half_gp, + group, + mali_gp_irq_probe_trigger, + mali_gp_irq_probe_ack, + core, + resource->description); + if (NULL != core->irq) { + MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core)); + mali_global_gp_core = core; + + return core; + } else { + MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description)); + } + mali_group_remove_gp_core(group); + } else { + MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description)); + } + } + mali_hw_core_delete(&core->hw_core); + } + + _mali_osk_free(core); + } else { + MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n")); + } + + return NULL; +} + +void mali_gp_delete(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + + _mali_osk_irq_term(core->irq); + mali_hw_core_delete(&core->hw_core); + mali_global_gp_core = NULL; + _mali_osk_free(core); +} + +void mali_gp_stop_bus(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS); +} + +_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core) +{ + int i; + + MALI_DEBUG_ASSERT_POINTER(core); + + /* Send the stop bus command. */ + mali_gp_stop_bus(core); + + /* Wait for bus to be stopped */ + for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; i++) { + if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) { + break; + } + } + + if (MALI_REG_POLL_COUNT_SLOW == i) { + MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description)); + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +void mali_gp_hard_reset(struct mali_gp_core *core) +{ + const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT; + const u32 reset_invalid_value = 0xC0FFE000; + const u32 reset_check_value = 0xC01A0000; + const u32 reset_default_value = 0; + int i; + + MALI_DEBUG_ASSERT_POINTER(core); + MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description)); + + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value); + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value); + if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) { + break; + } + } + + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n")); + } + + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */ + /* Re-enable interrupts */ + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); + +} + +void mali_gp_reset_async(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + + MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description)); + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */ + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET); + +} + +_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core) +{ + int i; + u32 rawstat = 0; + + MALI_DEBUG_ASSERT_POINTER(core); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT); + if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) { + break; + } + } + + if (i == MALI_REG_POLL_COUNT_FAST) { + MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n", + core->hw_core.description, rawstat)); + return _MALI_OSK_ERR_FAULT; + } + + /* Re-enable interrupts */ + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core) +{ + mali_gp_reset_async(core); + return mali_gp_reset_wait(core); +} + +void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job) +{ + u32 startcmd = 0; + u32 *frame_registers = mali_gp_job_get_frame_registers(job); + u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job); + u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job); + + MALI_DEBUG_ASSERT_POINTER(core); + + if (mali_gp_job_has_vs_job(job)) { + startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS; + } + + if (mali_gp_job_has_plbu_job(job)) { + startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU; + } + + MALI_DEBUG_ASSERT(0 != startcmd); + + mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME); + + if (MALI_HW_CORE_NO_COUNTER != counter_src0) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); + } + if (MALI_HW_CORE_NO_COUNTER != counter_src1) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); + } + + MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd)); + + mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC); + + /* Barrier to make sure the previous register write is finished */ + _mali_osk_write_mem_barrier(); + + /* This is the command that starts the core. + * + * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just + * force core to assert the completion interrupt. + */ +#if !defined(PROFILING_SKIP_GP_JOBS) + mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd); +#else + { + u32 bits = 0; + + if (mali_gp_job_has_vs_job(job)) + bits = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST; + if (mali_gp_job_has_plbu_job(job)) + bits |= MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST; + + mali_hw_core_register_write_relaxed(&core->hw_core, + MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, bits); + } +#endif + + /* Barrier to make sure the previous register write is finished */ + _mali_osk_write_mem_barrier(); +} + +void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr) +{ + u32 irq_readout; + + MALI_DEBUG_ASSERT_POINTER(core); + + irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT); + + if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG)); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */ + mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr); + mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr); + + MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n")); + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC); + _mali_osk_write_mem_barrier(); + } + /* + * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response. + * A timeout or a page fault on Mali-200 PP core can cause this behaviour. + */ +} + +u32 mali_gp_core_get_version(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION); +} + +struct mali_gp_core *mali_gp_get_global_gp_core(void) +{ + return mali_global_gp_core; +} + +/* ------------- interrupt handling below ------------------ */ +static void mali_gp_irq_probe_trigger(void *data) +{ + struct mali_gp_core *core = (struct mali_gp_core *)data; + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR); + _mali_osk_mem_barrier(); +} + +static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data) +{ + struct mali_gp_core *core = (struct mali_gp_core *)data; + u32 irq_readout; + + irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT); + if (MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR & irq_readout) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR); + _mali_osk_mem_barrier(); + return _MALI_OSK_ERR_OK; + } + + return _MALI_OSK_ERR_FAULT; +} + +/* ------ local helper functions below --------- */ +#if MALI_STATE_TRACKING +u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size) +{ + int n = 0; + + n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description); + + return n; +} +#endif + +void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job) +{ + u32 val0 = 0; + u32 val1 = 0; + u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job); + u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job); + + if (MALI_HW_CORE_NO_COUNTER != counter_src0) { + val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE); + mali_gp_job_set_perf_counter_value0(job, val0); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0); + _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C0, val0); +#endif + + } + + if (MALI_HW_CORE_NO_COUNTER != counter_src1) { + val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE); + mali_gp_job_set_perf_counter_value1(job, val1); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1); + _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C1, val1); +#endif + } +} diff --git a/drivers/gpu/arm/utgard/common/mali_gp.h b/drivers/gpu/arm/utgard/common/mali_gp.h new file mode 100644 index 000000000000..8d5f69c23229 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_gp.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_GP_H__ +#define __MALI_GP_H__ + +#include "mali_osk.h" +#include "mali_gp_job.h" +#include "mali_hw_core.h" +#include "regs/mali_gp_regs.h" + +struct mali_group; + +/** + * Definition of the GP core struct + * Used to track a GP core in the system. + */ +struct mali_gp_core { + struct mali_hw_core hw_core; /**< Common for all HW cores */ + _mali_osk_irq_t *irq; /**< IRQ handler */ +}; + +_mali_osk_errcode_t mali_gp_initialize(void); +void mali_gp_terminate(void); + +struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group); +void mali_gp_delete(struct mali_gp_core *core); + +void mali_gp_stop_bus(struct mali_gp_core *core); +_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core); +void mali_gp_reset_async(struct mali_gp_core *core); +_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core); +void mali_gp_hard_reset(struct mali_gp_core *core); +_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core); + +void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job); +void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr); + +u32 mali_gp_core_get_version(struct mali_gp_core *core); + +struct mali_gp_core *mali_gp_get_global_gp_core(void); + +#if MALI_STATE_TRACKING +u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size); +#endif + +void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job); + +MALI_STATIC_INLINE const char *mali_gp_core_description(struct mali_gp_core *core) +{ + return core->hw_core.description; +} + +MALI_STATIC_INLINE enum mali_interrupt_result mali_gp_get_interrupt_result(struct mali_gp_core *core) +{ + u32 stat_used = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT) & + MALIGP2_REG_VAL_IRQ_MASK_USED; + + if (0 == stat_used) { + return MALI_INTERRUPT_RESULT_NONE; + } else if ((MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | + MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) == stat_used) { + return MALI_INTERRUPT_RESULT_SUCCESS; + } else if (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST == stat_used) { + return MALI_INTERRUPT_RESULT_SUCCESS_VS; + } else if (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST == stat_used) { + return MALI_INTERRUPT_RESULT_SUCCESS_PLBU; + } else if (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM & stat_used) { + return MALI_INTERRUPT_RESULT_OOM; + } + + return MALI_INTERRUPT_RESULT_ERROR; +} + +MALI_STATIC_INLINE u32 mali_gp_get_rawstat(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return mali_hw_core_register_read(&core->hw_core, + MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT); +} + +MALI_STATIC_INLINE u32 mali_gp_is_active(struct mali_gp_core *core) +{ + u32 status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS); + return (status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core) +{ + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE); +} + +MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, enum mali_interrupt_result exceptions) +{ + /* Enable all interrupts, except those specified in exceptions */ + u32 value; + + if (MALI_INTERRUPT_RESULT_SUCCESS_VS == exceptions) { + /* Enable all used except VS complete */ + value = MALIGP2_REG_VAL_IRQ_MASK_USED & + ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST; + } else { + MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_SUCCESS_PLBU == + exceptions); + /* Enable all used except PLBU complete */ + value = MALIGP2_REG_VAL_IRQ_MASK_USED & + ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST; + } + + mali_hw_core_register_write(&core->hw_core, + MALIGP2_REG_ADDR_MGMT_INT_MASK, + value); +} + +MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core) +{ + return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR); +} + +#endif /* __MALI_GP_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_gp_job.c b/drivers/gpu/arm/utgard/common/mali_gp_job.c new file mode 100644 index 000000000000..adc30a3408a8 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_gp_job.c @@ -0,0 +1,301 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_gp_job.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_uk_types.h" +#include "mali_memory_virtual.h" +#include "mali_memory_defer_bind.h" + +static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */ +static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */ +static void _mali_gp_del_varying_allocations(struct mali_gp_job *job); + + +static int _mali_gp_add_varying_allocations(struct mali_session_data *session, + struct mali_gp_job *job, + u32 *alloc, + u32 num) +{ + int i = 0; + struct mali_gp_allocation_node *alloc_node; + mali_mem_allocation *mali_alloc = NULL; + struct mali_vma_node *mali_vma_node = NULL; + + for (i = 0 ; i < num ; i++) { + MALI_DEBUG_ASSERT(alloc[i]); + alloc_node = _mali_osk_calloc(1, sizeof(struct mali_gp_allocation_node)); + if (alloc_node) { + INIT_LIST_HEAD(&alloc_node->node); + /* find mali allocation structure by vaddress*/ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, alloc[i], 0); + + if (likely(mali_vma_node)) { + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(alloc[i] == mali_vma_node->vm_node.start); + } else { + MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,can't find allocation %d by address =0x%x, num=%d\n", i, alloc[i], num)); + MALI_DEBUG_ASSERT(0); + } + alloc_node->alloc = mali_alloc; + /* add to gp job varying alloc list*/ + list_move(&alloc_node->node, &job->varying_alloc); + } else + goto fail; + } + + return 0; +fail: + MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,failed to alloc memory!\n")); + _mali_gp_del_varying_allocations(job); + return -1; +} + + +static void _mali_gp_del_varying_allocations(struct mali_gp_job *job) +{ + struct mali_gp_allocation_node *alloc_node, *tmp_node; + + list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) { + list_del(&alloc_node->node); + kfree(alloc_node); + } + INIT_LIST_HEAD(&job->varying_alloc); +} + +struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker) +{ + struct mali_gp_job *job; + u32 perf_counter_flag; + u32 __user *memory_list = NULL; + struct mali_gp_allocation_node *alloc_node, *tmp_node; + + job = _mali_osk_calloc(1, sizeof(struct mali_gp_job)); + if (NULL != job) { + job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); + if (NULL == job->finished_notification) { + goto fail3; + } + + job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); + if (NULL == job->oom_notification) { + goto fail2; + } + + if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { + goto fail1; + } + + perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); + + /* case when no counters came from user space + * so pass the debugfs / DS-5 provided global ones to the job object */ + if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || + (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { + mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0()); + mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1()); + } + + _mali_osk_list_init(&job->list); + job->session = session; + job->id = id; + job->heap_base_addr = job->uargs.frame_registers[4]; + job->heap_current_addr = job->uargs.frame_registers[4]; + job->heap_grow_size = job->uargs.heap_grow_size; + job->perf_counter_value0 = 0; + job->perf_counter_value1 = 0; + job->pid = _mali_osk_get_pid(); + job->tid = _mali_osk_get_tid(); + + + INIT_LIST_HEAD(&job->varying_alloc); + INIT_LIST_HEAD(&job->vary_todo); + job->dmem = NULL; + /* add varying allocation list*/ + if (uargs->varying_alloc_num) { + /* copy varying list from user space*/ + job->varying_list = _mali_osk_calloc(1, sizeof(u32) * uargs->varying_alloc_num); + if (!job->varying_list) { + MALI_PRINT_ERROR(("Mali GP job: allocate varying_list failed varying_alloc_num = %d !\n", uargs->varying_alloc_num)); + goto fail1; + } + + memory_list = (u32 __user *)(uintptr_t)uargs->varying_alloc_list; + + if (0 != _mali_osk_copy_from_user(job->varying_list, memory_list, sizeof(u32)*uargs->varying_alloc_num)) { + MALI_PRINT_ERROR(("Mali GP job: Failed to copy varying list from user space!\n")); + goto fail; + } + + if (unlikely(_mali_gp_add_varying_allocations(session, job, job->varying_list, + uargs->varying_alloc_num))) { + MALI_PRINT_ERROR(("Mali GP job: _mali_gp_add_varying_allocations failed!\n")); + goto fail; + } + + /* do preparetion for each allocation */ + list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) { + if (unlikely(_MALI_OSK_ERR_OK != mali_mem_defer_bind_allocation_prepare(alloc_node->alloc, &job->vary_todo))) { + MALI_PRINT_ERROR(("Mali GP job: mali_mem_defer_bind_allocation_prepare failed!\n")); + goto fail; + } + } + + _mali_gp_del_varying_allocations(job); + + /* bind varying here, to avoid memory latency issue. */ + { + struct mali_defer_mem_block dmem_block; + + INIT_LIST_HEAD(&dmem_block.free_pages); + atomic_set(&dmem_block.num_free_pages, 0); + + if (mali_mem_prepare_mem_for_job(job, &dmem_block)) { + MALI_PRINT_ERROR(("Mali GP job: mali_mem_prepare_mem_for_job failed!\n")); + goto fail; + } + if (_MALI_OSK_ERR_OK != mali_mem_defer_bind(job->uargs.varying_memsize / _MALI_OSK_MALI_PAGE_SIZE, job, &dmem_block)) { + MALI_PRINT_ERROR(("gp job create, mali_mem_defer_bind failed! GP %x fail!", job)); + goto fail; + } + } + + if (uargs->varying_memsize > MALI_UK_BIG_VARYING_SIZE) { + job->big_job = 1; + } + } + job->pp_tracker = pp_tracker; + if (NULL != job->pp_tracker) { + /* Take a reference on PP job's tracker that will be released when the GP + job is done. */ + mali_timeline_system_tracker_get(session->timeline_system, pp_tracker); + } + + mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job); + mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); + + return job; + } else { + MALI_PRINT_ERROR(("Mali GP job: _mali_osk_calloc failed!\n")); + return NULL; + } + + +fail: + _mali_osk_free(job->varying_list); + /* Handle allocate fail here, free all varying node */ + { + struct mali_backend_bind_list *bkn, *bkn_tmp; + list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) { + list_del(&bkn->node); + _mali_osk_free(bkn); + } + } +fail1: + _mali_osk_notification_delete(job->oom_notification); +fail2: + _mali_osk_notification_delete(job->finished_notification); +fail3: + _mali_osk_free(job); + return NULL; +} + +void mali_gp_job_delete(struct mali_gp_job *job) +{ + struct mali_backend_bind_list *bkn, *bkn_tmp; + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT(NULL == job->pp_tracker); + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list)); + _mali_osk_free(job->varying_list); + + /* Handle allocate fail here, free all varying node */ + list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) { + list_del(&bkn->node); + _mali_osk_free(bkn); + } + + if (!list_empty(&job->vary_todo)) { + MALI_DEBUG_ASSERT(0); + } + + mali_mem_defer_dmem_free(job); + + /* de-allocate the pre-allocated oom notifications */ + if (NULL != job->oom_notification) { + _mali_osk_notification_delete(job->oom_notification); + job->oom_notification = NULL; + } + if (NULL != job->finished_notification) { + _mali_osk_notification_delete(job->finished_notification); + job->finished_notification = NULL; + } + + _mali_osk_free(job); +} + +void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list) +{ + struct mali_gp_job *iter; + struct mali_gp_job *tmp; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + /* Find position in list/queue where job should be added. */ + _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list, + struct mali_gp_job, list) { + + /* A span is used to handle job ID wrapping. */ + bool job_is_after = (mali_gp_job_get_id(job) - + mali_gp_job_get_id(iter)) < + MALI_SCHEDULER_JOB_ID_SPAN; + + if (job_is_after) { + break; + } + } + + _mali_osk_list_add(&job->list, &iter->list); +} + +u32 mali_gp_job_get_gp_counter_src0(void) +{ + return gp_counter_src0; +} + +void mali_gp_job_set_gp_counter_src0(u32 counter) +{ + gp_counter_src0 = counter; +} + +u32 mali_gp_job_get_gp_counter_src1(void) +{ + return gp_counter_src1; +} + +void mali_gp_job_set_gp_counter_src1(u32 counter) +{ + gp_counter_src1 = counter; +} + +mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(job); + + if (NULL != job->pp_tracker) { + schedule_mask |= mali_timeline_system_tracker_put(job->session->timeline_system, job->pp_tracker, MALI_FALSE == success); + job->pp_tracker = NULL; + } + + return schedule_mask; +} diff --git a/drivers/gpu/arm/utgard/common/mali_gp_job.h b/drivers/gpu/arm/utgard/common/mali_gp_job.h new file mode 100644 index 000000000000..f249439c7155 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_gp_job.h @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_GP_JOB_H__ +#define __MALI_GP_JOB_H__ + +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_uk_types.h" +#include "mali_session.h" +#include "mali_timeline.h" +#include "mali_scheduler_types.h" +#include "mali_scheduler.h" +#include "mali_executor.h" +#include "mali_timeline.h" + +struct mali_defer_mem; +/** + * This structure represents a GP job + * + * The GP job object itself is not protected by any single lock, + * but relies on other locks instead (scheduler, executor and timeline lock). + * Think of the job object as moving between these sub systems through-out + * its lifetime. Different part of the GP job struct is used by different + * subsystems. Accessor functions ensure that correct lock is taken. + * Do NOT access any data members directly from outside this module! + */ +struct mali_gp_job { + /* + * These members are typically only set at creation, + * and only read later on. + * They do not require any lock protection. + */ + _mali_uk_gp_start_job_s uargs; /**< Arguments from user space */ + struct mali_session_data *session; /**< Session which submitted this job */ + u32 pid; /**< Process ID of submitting process */ + u32 tid; /**< Thread ID of submitting thread */ + u32 id; /**< Identifier for this job in kernel space (sequential numbering) */ + u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */ + struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */ + struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */ + _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */ + + /* + * These members are used by the scheduler, + * protected by scheduler lock + */ + _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */ + + /* + * These members are used by the executor and/or group, + * protected by executor lock + */ + _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */ + + /* + * Set by executor/group on job completion, read by scheduler when + * returning job to user. Hold executor lock when setting, + * no lock needed when reading + */ + u32 heap_base_addr; /** < Holds the base mali addr of mem handle which is used for new heap*/ + u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */ + u32 heap_grow_size; /** < Holds the HEAP grow size when HEAP oom */ + u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */ + u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */ + struct mali_defer_mem *dmem; /** < used for defer bind to store dmem info */ + struct list_head varying_alloc; /**< hold the list of varying allocations */ + u32 bind_flag; /** < flag for deferbind*/ + u32 *varying_list; /**< varying memory list need to to defer bind*/ + struct list_head vary_todo; /**< list of backend list need to do defer bind*/ + u32 big_job; /** < if the gp job have large varying output and may take long time*/ +}; + +#define MALI_DEFER_BIND_MEMORY_PREPARED (0x1 << 0) +#define MALI_DEFER_BIND_MEMORY_BINDED (0x1 << 2) + +struct mali_gp_allocation_node { + struct list_head node; + mali_mem_allocation *alloc; +}; + +struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker); +void mali_gp_job_delete(struct mali_gp_job *job); + +u32 mali_gp_job_get_gp_counter_src0(void); +void mali_gp_job_set_gp_counter_src0(u32 counter); +u32 mali_gp_job_get_gp_counter_src1(void); +void mali_gp_job_set_gp_counter_src1(u32 counter); + +MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (NULL == job) ? 0 : job->id; +} + +MALI_STATIC_INLINE void mali_gp_job_set_cache_order(struct mali_gp_job *job, + u32 cache_order) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + job->cache_order = cache_order; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (NULL == job) ? 0 : job->cache_order; +} + +MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.user_job_ptr; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.frame_builder_id; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.flush_id; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->pid; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->tid; +} + +MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.frame_registers; +} + +MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->session; +} + +MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->heap_current_addr; +} + +MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->heap_current_addr = heap_addr; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.perf_counter_flag; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.perf_counter_src0; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.perf_counter_src1; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->perf_counter_value0; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->perf_counter_value1; +} + +MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.perf_counter_src0 = src; +} + +MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.perf_counter_src1 = src; +} + +MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->perf_counter_value0 = value; +} + +MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->perf_counter_value1 = value; +} + +void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list); + +MALI_STATIC_INLINE void mali_gp_job_list_move(struct mali_gp_job *job, + _mali_osk_list_t *list) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list)); + _mali_osk_list_move(&job->list, list); +} + +MALI_STATIC_INLINE void mali_gp_job_list_remove(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + _mali_osk_list_delinit(&job->list); +} + +MALI_STATIC_INLINE _mali_osk_notification_t * +mali_gp_job_get_finished_notification(struct mali_gp_job *job) +{ + _mali_osk_notification_t *notification; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->finished_notification); + + notification = job->finished_notification; + job->finished_notification = NULL; + + return notification; +} + +MALI_STATIC_INLINE _mali_osk_notification_t *mali_gp_job_get_oom_notification( + struct mali_gp_job *job) +{ + _mali_osk_notification_t *notification; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(job->oom_notification); + + notification = job->oom_notification; + job->oom_notification = NULL; + + return notification; +} + +MALI_STATIC_INLINE void mali_gp_job_set_oom_notification( + struct mali_gp_job *job, + _mali_osk_notification_t *notification) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(NULL == job->oom_notification); + job->oom_notification = notification; +} + +MALI_STATIC_INLINE struct mali_timeline_tracker *mali_gp_job_get_tracker( + struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return &(job->tracker); +} + + +MALI_STATIC_INLINE u32 *mali_gp_job_get_timeline_point_ptr( + struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr; +} + + +/** + * Release reference on tracker for PP job that depends on this GP job. + * + * @note If GP job has a reference on tracker, this function MUST be called before the GP job is + * deleted. + * + * @param job GP job that is done. + * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not. + * @return A scheduling bitmask indicating whether scheduling needs to be done. + */ +mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success); + +#endif /* __MALI_GP_JOB_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_group.c b/drivers/gpu/arm/utgard/common/mali_group.c new file mode 100644 index 000000000000..b4cd3a1fcd8c --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_group.c @@ -0,0 +1,1816 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "mali_kernel_common.h" +#include "mali_group.h" +#include "mali_osk.h" +#include "mali_l2_cache.h" +#include "mali_gp.h" +#include "mali_pp.h" +#include "mali_mmu.h" +#include "mali_dlbu.h" +#include "mali_broadcast.h" +#include "mali_scheduler.h" +#include "mali_osk_profiling.h" +#include "mali_pm_domain.h" +#include "mali_pm.h" +#include "mali_executor.h" + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) +#include <linux/sched.h> +#include <trace/events/gpu.h> +#endif + +#define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2) + +#if defined(CONFIG_MALI400_PROFILING) +static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num); +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ + +static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, }; +static u32 mali_global_num_groups = 0; + +/* SW timer for job execution */ +int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT; + +/* local helper functions */ +static void mali_group_bottom_half_mmu(void *data); +static void mali_group_bottom_half_gp(void *data); +static void mali_group_bottom_half_pp(void *data); +static void mali_group_timeout(void *data); +static void mali_group_out_of_memory(void *data); + +static void mali_group_reset_pp(struct mali_group *group); +static void mali_group_reset_mmu(struct mali_group *group); + +static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session); +static void mali_group_recovery_reset(struct mali_group *group); + +struct mali_group *mali_group_create(struct mali_l2_cache_core *core, + struct mali_dlbu_core *dlbu, + struct mali_bcast_unit *bcast, + u32 domain_index) +{ + struct mali_group *group = NULL; + + if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) { + MALI_PRINT_ERROR(("Mali group: Too many group objects created\n")); + return NULL; + } + + group = _mali_osk_calloc(1, sizeof(struct mali_group)); + if (NULL != group) { + group->timeout_timer = _mali_osk_timer_init(); + if (NULL != group->timeout_timer) { + _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group); + + group->l2_cache_core[0] = core; + _mali_osk_list_init(&group->group_list); + _mali_osk_list_init(&group->executor_list); + _mali_osk_list_init(&group->pm_domain_list); + group->bcast_core = bcast; + group->dlbu_core = dlbu; + + /* register this object as a part of the correct power domain */ + if ((NULL != core) || (NULL != dlbu) || (NULL != bcast)) + group->pm_domain = mali_pm_register_group(domain_index, group); + + mali_global_groups[mali_global_num_groups] = group; + mali_global_num_groups++; + + return group; + } + _mali_osk_free(group); + } + + return NULL; +} + +void mali_group_delete(struct mali_group *group) +{ + u32 i; + + MALI_DEBUG_PRINT(4, ("Deleting group %s\n", + mali_group_core_description(group))); + + MALI_DEBUG_ASSERT(NULL == group->parent_group); + MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state))); + + /* Delete the resources that this group owns */ + if (NULL != group->gp_core) { + mali_gp_delete(group->gp_core); + } + + if (NULL != group->pp_core) { + mali_pp_delete(group->pp_core); + } + + if (NULL != group->mmu) { + mali_mmu_delete(group->mmu); + } + + if (mali_group_is_virtual(group)) { + /* Remove all groups from virtual group */ + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + child->parent_group = NULL; + mali_group_delete(child); + } + + mali_dlbu_delete(group->dlbu_core); + + if (NULL != group->bcast_core) { + mali_bcast_unit_delete(group->bcast_core); + } + } + + for (i = 0; i < mali_global_num_groups; i++) { + if (mali_global_groups[i] == group) { + mali_global_groups[i] = NULL; + mali_global_num_groups--; + + if (i != mali_global_num_groups) { + /* We removed a group from the middle of the array -- move the last + * group to the current position to close the gap */ + mali_global_groups[i] = mali_global_groups[mali_global_num_groups]; + mali_global_groups[mali_global_num_groups] = NULL; + } + + break; + } + } + + if (NULL != group->timeout_timer) { + _mali_osk_timer_del(group->timeout_timer); + _mali_osk_timer_term(group->timeout_timer); + } + + if (NULL != group->bottom_half_work_mmu) { + _mali_osk_wq_delete_work(group->bottom_half_work_mmu); + } + + if (NULL != group->bottom_half_work_gp) { + _mali_osk_wq_delete_work(group->bottom_half_work_gp); + } + + if (NULL != group->bottom_half_work_pp) { + _mali_osk_wq_delete_work(group->bottom_half_work_pp); + } + + _mali_osk_free(group); +} + +_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core) +{ + /* This group object now owns the MMU core object */ + group->mmu = mmu_core; + group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group); + if (NULL == group->bottom_half_work_mmu) { + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +void mali_group_remove_mmu_core(struct mali_group *group) +{ + /* This group object no longer owns the MMU core object */ + group->mmu = NULL; + if (NULL != group->bottom_half_work_mmu) { + _mali_osk_wq_delete_work(group->bottom_half_work_mmu); + } +} + +_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core) +{ + /* This group object now owns the GP core object */ + group->gp_core = gp_core; + group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group); + if (NULL == group->bottom_half_work_gp) { + return _MALI_OSK_ERR_FAULT; + } + + group->oom_work_handler = _mali_osk_wq_create_work(mali_group_out_of_memory, group); + if (NULL == group->oom_work_handler) { + _mali_osk_wq_delete_work(group->bottom_half_work_gp); + } + return _MALI_OSK_ERR_OK; +} + +void mali_group_remove_gp_core(struct mali_group *group) +{ + /* This group object no longer owns the GP core object */ + group->gp_core = NULL; + if (NULL != group->bottom_half_work_gp) { + _mali_osk_wq_delete_work(group->bottom_half_work_gp); + } + + if (NULL != group->oom_work_handler) { + _mali_osk_wq_delete_work(group->oom_work_handler); + } +} + +_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core) +{ + /* This group object now owns the PP core object */ + group->pp_core = pp_core; + group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group); + if (NULL == group->bottom_half_work_pp) { + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +void mali_group_remove_pp_core(struct mali_group *group) +{ + /* This group object no longer owns the PP core object */ + group->pp_core = NULL; + if (NULL != group->bottom_half_work_pp) { + _mali_osk_wq_delete_work(group->bottom_half_work_pp); + } +} + +enum mali_group_state mali_group_activate(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n", + mali_group_core_description(group))); + + if (MALI_GROUP_STATE_INACTIVE == group->state) { + /* Group is inactive, get PM refs in order to power up */ + + /* + * We'll take a maximum of 2 power domain references pr group, + * one for the group itself, and one for it's L2 cache. + */ + struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS]; + struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS]; + u32 num_domains = 0; + mali_bool all_groups_on; + + /* Deal with child groups first */ + if (mali_group_is_virtual(group)) { + /* + * The virtual group might have 0, 1 or 2 L2s in + * its l2_cache_core array, but we ignore these and + * let the child groups take the needed L2 cache ref + * on behalf of the virtual group. + * In other words; The L2 refs are taken in pair with + * the physical group which the L2 is attached to. + */ + struct mali_group *child; + struct mali_group *temp; + + /* + * Child group is inactive, get PM + * refs in order to power up. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, + &group->group_list, + struct mali_group, group_list) { + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE + == child->state); + + child->state = MALI_GROUP_STATE_ACTIVATION_PENDING; + + MALI_DEBUG_ASSERT_POINTER( + child->pm_domain); + domains[num_domains] = child->pm_domain; + groups[num_domains] = child; + num_domains++; + + /* + * Take L2 domain ref for child group. + */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS + > num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + child->l2_cache_core[0]); + groups[num_domains] = NULL; + MALI_DEBUG_ASSERT(NULL == + child->l2_cache_core[1]); + num_domains++; + } + } else { + /* Take L2 domain ref for physical groups. */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + + domains[num_domains] = mali_l2_cache_get_pm_domain( + group->l2_cache_core[0]); + groups[num_domains] = NULL; + MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]); + num_domains++; + } + + /* Do the group itself last (it's dependencies first) */ + + group->state = MALI_GROUP_STATE_ACTIVATION_PENDING; + + MALI_DEBUG_ASSERT_POINTER(group->pm_domain); + domains[num_domains] = group->pm_domain; + groups[num_domains] = group; + num_domains++; + + all_groups_on = mali_pm_get_domain_refs(domains, groups, + num_domains); + + /* + * Complete activation for group, include + * virtual group or physical group. + */ + if (MALI_TRUE == all_groups_on) { + + mali_group_set_active(group); + } + } else if (MALI_GROUP_STATE_ACTIVE == group->state) { + /* Already active */ + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + } else { + /* + * Activation already pending, group->power_is_on could + * be both true or false. We need to wait for power up + * notification anyway. + */ + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING + == group->state); + } + + MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n", + mali_group_core_description(group), + MALI_GROUP_STATE_ACTIVE == group->state ? + "ACTIVE" : "PENDING")); + + return group->state; +} + +mali_bool mali_group_set_active(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state); + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + + MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n", + mali_group_core_description(group))); + + if (mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, + struct mali_group, group_list) { + if (MALI_TRUE != child->power_is_on) { + return MALI_FALSE; + } + + child->state = MALI_GROUP_STATE_ACTIVE; + } + + mali_group_reset(group); + } + + /* Go to ACTIVE state */ + group->state = MALI_GROUP_STATE_ACTIVE; + + return MALI_TRUE; +} + +mali_bool mali_group_deactivate(struct mali_group *group) +{ + struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS]; + u32 num_domains = 0; + mali_bool power_down = MALI_FALSE; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state); + + MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n", + mali_group_core_description(group))); + + group->state = MALI_GROUP_STATE_INACTIVE; + + MALI_DEBUG_ASSERT_POINTER(group->pm_domain); + domains[num_domains] = group->pm_domain; + num_domains++; + + if (mali_group_is_virtual(group)) { + /* Release refs for all child groups */ + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, + &group->group_list, + struct mali_group, group_list) { + child->state = MALI_GROUP_STATE_INACTIVE; + + MALI_DEBUG_ASSERT_POINTER(child->pm_domain); + domains[num_domains] = child->pm_domain; + num_domains++; + + /* Release L2 cache domain for child groups */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + child->l2_cache_core[0]); + MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]); + num_domains++; + } + + /* + * Must do mali_group_power_down() steps right here for + * virtual group, because virtual group itself is likely to + * stay powered on, however child groups are now very likely + * to be powered off (and thus lose their state). + */ + + mali_group_clear_session(group); + /* + * Disable the broadcast unit (clear it's mask). + * This is needed in case the GPU isn't actually + * powered down at this point and groups are + * removed from an inactive virtual group. + * If not, then the broadcast unit will intercept + * their interrupts! + */ + mali_bcast_disable(group->bcast_core); + } else { + /* Release L2 cache domain for physical groups */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + group->l2_cache_core[0]); + MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]); + num_domains++; + } + + power_down = mali_pm_put_domain_refs(domains, num_domains); + + return power_down; +} + +void mali_group_power_up(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n", + mali_group_core_description(group))); + + group->power_is_on = MALI_TRUE; + + if (MALI_FALSE == mali_group_is_virtual(group) + && MALI_FALSE == mali_group_is_in_virtual(group)) { + mali_group_reset(group); + } + + /* + * When we just acquire only one physical group form virt group, + * we should remove the bcast&dlbu mask from virt group and + * reset bcast and dlbu core, although part of pp cores in virt + * group maybe not be powered on. + */ + if (MALI_TRUE == mali_group_is_virtual(group)) { + mali_bcast_reset(group->bcast_core); + mali_dlbu_update_mask(group->dlbu_core); + } +} + +void mali_group_power_down(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n", + mali_group_core_description(group))); + + group->power_is_on = MALI_FALSE; + + if (mali_group_is_virtual(group)) { + /* + * What we do for physical jobs in this function should + * already have been done in mali_group_deactivate() + * for virtual group. + */ + MALI_DEBUG_ASSERT(NULL == group->session); + } else { + mali_group_clear_session(group); + } +} + +MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup) +{ + u32 i; + struct mali_group *group; + struct mali_group *temp; + + MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n", + mali_group_core_description(vgroup), + vgroup)); + MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0])); + MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1])); + + i = 0; + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) { + MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n", + i, mali_group_core_description(group), + group, group->l2_cache_core[0])); + i++; + } +}) + +static void mali_group_dump_core_status(struct mali_group *group) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT(NULL != group->gp_core || (NULL != group->pp_core && !mali_group_is_virtual(group))); + + if (NULL != group->gp_core) { + MALI_PRINT(("Dump Group %s\n", group->gp_core->hw_core.description)); + + for (i = 0; i < 0xA8; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->gp_core->hw_core, i), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 12))); + } + + + } else { + MALI_PRINT(("Dump Group %s\n", group->pp_core->hw_core.description)); + + for (i = 0; i < 0x5c; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 12))); + } + + /* Ignore some minor registers */ + for (i = 0x1000; i < 0x1068; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 12))); + } + } + + MALI_PRINT(("Dump Group MMU\n")); + for (i = 0; i < 0x24; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->mmu->hw_core, i), + mali_hw_core_register_read(&group->mmu->hw_core, i + 4), + mali_hw_core_register_read(&group->mmu->hw_core, i + 8), + mali_hw_core_register_read(&group->mmu->hw_core, i + 12))); + } +} + + +/** + * @Dump group status + */ +void mali_group_dump_status(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + + if (mali_group_is_virtual(group)) { + struct mali_group *group_c; + struct mali_group *temp; + _MALI_OSK_LIST_FOREACHENTRY(group_c, temp, &group->group_list, struct mali_group, group_list) { + mali_group_dump_core_status(group_c); + } + } else { + mali_group_dump_core_status(group); + } +} + +/** + * @brief Add child group to virtual group parent + */ +void mali_group_add_group(struct mali_group *parent, struct mali_group *child) +{ + mali_bool found; + u32 i; + + MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n", + mali_group_core_description(child), + mali_group_core_description(parent))); + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + MALI_DEBUG_ASSERT(!mali_group_is_virtual(child)); + MALI_DEBUG_ASSERT(NULL == child->parent_group); + + _mali_osk_list_addtail(&child->group_list, &parent->group_list); + + child->parent_group = parent; + + MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]); + + MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1])); + MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1])); + + /* Keep track of the L2 cache cores of child groups */ + found = MALI_FALSE; + for (i = 0; i < 2; i++) { + if (parent->l2_cache_core[i] == child->l2_cache_core[0]) { + MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0); + parent->l2_cache_core_ref_count[i]++; + found = MALI_TRUE; + } + } + + if (!found) { + /* First time we see this L2 cache, add it to our list */ + i = (NULL == parent->l2_cache_core[0]) ? 0 : 1; + + MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i])); + + MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]); + + parent->l2_cache_core[i] = child->l2_cache_core[0]; + parent->l2_cache_core_ref_count[i]++; + } + + /* Update Broadcast Unit and DLBU */ + mali_bcast_add_group(parent->bcast_core, child); + mali_dlbu_add_group(parent->dlbu_core, child); + + if (MALI_TRUE == parent->power_is_on) { + mali_bcast_reset(parent->bcast_core); + mali_dlbu_update_mask(parent->dlbu_core); + } + + if (MALI_TRUE == child->power_is_on) { + if (NULL == parent->session) { + if (NULL != child->session) { + /* + * Parent has no session, so clear + * child session as well. + */ + mali_mmu_activate_empty_page_directory(child->mmu); + } + } else { + if (parent->session == child->session) { + /* We already have same session as parent, + * so a simple zap should be enough. + */ + mali_mmu_zap_tlb(child->mmu); + } else { + /* + * Parent has a different session, so we must + * switch to that sessions page table + */ + mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session)); + } + + /* It is the parent which keeps the session from now on */ + child->session = NULL; + } + } else { + /* should have been cleared when child was powered down */ + MALI_DEBUG_ASSERT(NULL == child->session); + } + + /* Start job on child when parent is active */ + if (NULL != parent->pp_running_job) { + struct mali_pp_job *job = parent->pp_running_job; + + MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n", + child, mali_pp_job_get_id(job), parent)); + + /* Only allowed to add active child to an active parent */ + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state); + + mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch( + mali_pp_core_description(group->pp_core), + sched_clock(), mali_pp_job_get_tid(job), + 0, mali_pp_job_get_id(job)); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); +#endif + } + + MALI_DEBUG_CODE(mali_group_print_virtual(parent);) +} + +/** + * @brief Remove child group from virtual group parent + */ +void mali_group_remove_group(struct mali_group *parent, struct mali_group *child) +{ + u32 i; + + MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n", + mali_group_core_description(child), + mali_group_core_description(parent))); + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + MALI_DEBUG_ASSERT(!mali_group_is_virtual(child)); + MALI_DEBUG_ASSERT(parent == child->parent_group); + + /* Update Broadcast Unit and DLBU */ + mali_bcast_remove_group(parent->bcast_core, child); + mali_dlbu_remove_group(parent->dlbu_core, child); + + if (MALI_TRUE == parent->power_is_on) { + mali_bcast_reset(parent->bcast_core); + mali_dlbu_update_mask(parent->dlbu_core); + } + + child->session = parent->session; + child->parent_group = NULL; + + _mali_osk_list_delinit(&child->group_list); + if (_mali_osk_list_empty(&parent->group_list)) { + parent->session = NULL; + } + + /* Keep track of the L2 cache cores of child groups */ + i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1; + + MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]); + + parent->l2_cache_core_ref_count[i]--; + if (parent->l2_cache_core_ref_count[i] == 0) { + parent->l2_cache_core[i] = NULL; + } + + MALI_DEBUG_CODE(mali_group_print_virtual(parent)); +} + +struct mali_group *mali_group_acquire_group(struct mali_group *parent) +{ + struct mali_group *child = NULL; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + + if (!_mali_osk_list_empty(&parent->group_list)) { + child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list); + mali_group_remove_group(parent, child); + } + + if (NULL != child) { + if (MALI_GROUP_STATE_ACTIVE != parent->state + && MALI_TRUE == child->power_is_on) { + mali_group_reset(child); + } + } + + return child; +} + +void mali_group_reset(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(NULL == group->gp_running_job); + MALI_DEBUG_ASSERT(NULL == group->pp_running_job); + MALI_DEBUG_ASSERT(NULL == group->session); + + MALI_DEBUG_PRINT(3, ("Group: reset of %s\n", + mali_group_core_description(group))); + + if (NULL != group->dlbu_core) { + mali_dlbu_reset(group->dlbu_core); + } + + if (NULL != group->bcast_core) { + mali_bcast_reset(group->bcast_core); + } + + MALI_DEBUG_ASSERT(NULL != group->mmu); + mali_group_reset_mmu(group); + + if (NULL != group->gp_core) { + MALI_DEBUG_ASSERT(NULL == group->pp_core); + mali_gp_reset(group->gp_core); + } else { + MALI_DEBUG_ASSERT(NULL != group->pp_core); + mali_group_reset_pp(group); + } +} + +void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n", + job, + mali_group_core_description(group))); + + session = mali_gp_job_get_session(job); + + MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]); + mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job)); + + mali_group_activate_page_directory(group, session); + + mali_gp_job_start(group->gp_core, job); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0); + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job)); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, 0); + } +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch(mali_gp_core_description(group->gp_core), + sched_clock(), mali_gp_job_get_tid(job), + 0, mali_gp_job_get_id(job)); +#endif + + group->gp_running_job = job; + group->is_working = MALI_TRUE; + + /* Setup SW timer and record start time */ + group->start_time = _mali_osk_time_tickcount(); + _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); + + MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n", + job, + mali_group_core_description(group), + group->start_time)); +} + +/* Used to set all the registers except frame renderer list address and fragment shader stack address + * It means the caller must set these two registers properly before calling this function + */ +void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n", + job, sub_job + 1, + mali_pp_job_get_sub_job_count(job), + mali_group_core_description(group))); + + session = mali_pp_job_get_session(job); + + if (NULL != group->l2_cache_core[0]) { + mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job)); + } + + if (NULL != group->l2_cache_core[1]) { + mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job)); + } + + mali_group_activate_page_directory(group, session); + + if (mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + u32 core_num = 0; + + MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job)); + + /* Configure DLBU for the job */ + mali_dlbu_config_job(group->dlbu_core, job); + + /* Write stack address for each child group */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_write_addr_stack(child->pp_core, job); + core_num++; + } + + mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE); + } else { + mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE); + } + + /* if the group is virtual, loop through physical groups which belong to this group + * and call profiling events for its cores as virtual */ + if (MALI_TRUE == mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); +#endif + } + +#if defined(CONFIG_MALI400_PROFILING) + if (0 != group->l2_cache_core_ref_count[0]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } + } + if (0 != group->l2_cache_core_ref_count[1]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1])); + } + } +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ + + } else { /* group is physical - call profiling events for physical cores */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ + } + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch(mali_pp_core_description(group->pp_core), + sched_clock(), mali_pp_job_get_tid(job), + 0, mali_pp_job_get_id(job)); +#endif + + group->pp_running_job = job; + group->pp_running_sub_job = sub_job; + group->is_working = MALI_TRUE; + + /* Setup SW timer and record start time */ + group->start_time = _mali_osk_time_tickcount(); + _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); + + MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n", + job, sub_job + 1, + mali_pp_job_get_sub_job_count(job), + mali_group_core_description(group), + group->start_time)); + +} + +void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr) +{ + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]); + mali_l2_cache_invalidate(group->l2_cache_core[0]); + + mali_mmu_zap_tlb_without_stall(group->mmu); + + mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + 0, 0, 0, 0, 0); + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job)); +#endif +} + +static void mali_group_reset_mmu(struct mali_group *group) +{ + struct mali_group *child; + struct mali_group *temp; + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (!mali_group_is_virtual(group)) { + /* This is a physical group or an idle virtual group -- simply wait for + * the reset to complete. */ + err = mali_mmu_reset(group->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + } else { /* virtual group */ + /* Loop through all members of this virtual group and wait + * until they are done resetting. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + err = mali_mmu_reset(child->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + } + } +} + +static void mali_group_reset_pp(struct mali_group *group) +{ + struct mali_group *child; + struct mali_group *temp; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + mali_pp_reset_async(group->pp_core); + + if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) { + /* This is a physical group or an idle virtual group -- simply wait for + * the reset to complete. */ + mali_pp_reset_wait(group->pp_core); + } else { + /* Loop through all members of this virtual group and wait until they + * are done resetting. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_reset_wait(child->pp_core); + } + } +} + +struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job) +{ + struct mali_pp_job *pp_job_to_return; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->pp_running_job); + MALI_DEBUG_ASSERT_POINTER(sub_job); + MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working); + + /* Stop/clear the timeout timer. */ + _mali_osk_timer_del_async(group->timeout_timer); + + if (NULL != group->pp_running_job) { + + /* Deal with HW counters and profiling */ + + if (MALI_TRUE == mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + + /* update performance counters from each physical pp core within this virtual group */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core)); + } + +#if defined(CONFIG_MALI400_PROFILING) + /* send profiling data per physical core */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)), + mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)), + mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8), + 0, 0); + + trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), + 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(group->pp_running_job), + mali_pp_job_get_flush_id(group->pp_running_job)); + } + if (0 != group->l2_cache_core_ref_count[0]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } + } + if (0 != group->l2_cache_core_ref_count[1]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1])); + } + } + +#endif + } else { + /* update performance counters for a physical group's pp core */ + mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL, + mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job), + mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job), + mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8), + 0, 0); + + trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), + 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core), + mali_pp_job_get_frame_builder_id(group->pp_running_job), + mali_pp_job_get_flush_id(group->pp_running_job)); + + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } +#endif + } + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch( + mali_gp_core_description(group->gp_core), + sched_clock(), 0, 0, 0); +#endif + + } + + if (success) { + /* Only do soft reset for successful jobs, a full recovery + * reset will be done for failed jobs. */ + mali_pp_reset_async(group->pp_core); + } + + pp_job_to_return = group->pp_running_job; + group->pp_running_job = NULL; + group->is_working = MALI_FALSE; + *sub_job = group->pp_running_sub_job; + + if (!success) { + MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n")); + mali_group_recovery_reset(group); + } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) { + MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n")); + mali_group_recovery_reset(group); + } + + return pp_job_to_return; +} + +struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success) +{ + struct mali_gp_job *gp_job_to_return; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->gp_running_job); + MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working); + + /* Stop/clear the timeout timer. */ + _mali_osk_timer_del_async(group->timeout_timer); + + if (NULL != group->gp_running_job) { + mali_gp_update_performance_counters(group->gp_core, group->gp_running_job); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + mali_gp_job_get_perf_counter_value0(group->gp_running_job), + mali_gp_job_get_perf_counter_value1(group->gp_running_job), + mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8), + 0, 0); + + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) + mali_group_report_l2_cache_counters_per_core(group, 0); +#endif + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch( + mali_pp_core_description(group->pp_core), + sched_clock(), 0, 0, 0); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job)); +#endif + + mali_gp_job_set_current_heap_addr(group->gp_running_job, + mali_gp_read_plbu_alloc_start_addr(group->gp_core)); + } + + if (success) { + /* Only do soft reset for successful jobs, a full recovery + * reset will be done for failed jobs. */ + mali_gp_reset_async(group->gp_core); + } + + gp_job_to_return = group->gp_running_job; + group->gp_running_job = NULL; + group->is_working = MALI_FALSE; + + if (!success) { + MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n")); + mali_group_recovery_reset(group); + } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) { + MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n")); + mali_group_recovery_reset(group); + } + + return gp_job_to_return; +} + +struct mali_group *mali_group_get_glob_group(u32 index) +{ + if (mali_global_num_groups > index) { + return mali_global_groups[index]; + } + + return NULL; +} + +u32 mali_group_get_glob_num_groups(void) +{ + return mali_global_num_groups; +} + +static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session) +{ + MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n", + mali_session_get_page_directory(session), session, + mali_group_core_description(group))); + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (group->session != session) { + /* Different session than last time, so we need to do some work */ + MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n", + session, group->session, + mali_group_core_description(group))); + mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session)); + group->session = session; + } else { + /* Same session as last time, so no work required */ + MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n", + session->page_directory, + mali_group_core_description(group))); + mali_mmu_zap_tlb_without_stall(group->mmu); + } +} + +static void mali_group_recovery_reset(struct mali_group *group) +{ + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* Stop cores, bus stop */ + if (NULL != group->pp_core) { + mali_pp_stop_bus(group->pp_core); + } else { + mali_gp_stop_bus(group->gp_core); + } + + /* Flush MMU and clear page fault (if any) */ + mali_mmu_activate_fault_flush_page_directory(group->mmu); + mali_mmu_page_fault_done(group->mmu); + + /* Wait for cores to stop bus, then do a hard reset on them */ + if (NULL != group->pp_core) { + if (mali_group_is_virtual(group)) { + struct mali_group *child, *temp; + + /* Disable the broadcast unit while we do reset directly on the member cores. */ + mali_bcast_disable(group->bcast_core); + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_stop_bus_wait(child->pp_core); + mali_pp_hard_reset(child->pp_core); + } + + mali_bcast_enable(group->bcast_core); + } else { + mali_pp_stop_bus_wait(group->pp_core); + mali_pp_hard_reset(group->pp_core); + } + } else { + mali_gp_stop_bus_wait(group->gp_core); + mali_gp_hard_reset(group->gp_core); + } + + /* Reset MMU */ + err = mali_mmu_reset(group->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + MALI_IGNORE(err); + + group->session = NULL; +} + +#if MALI_STATE_TRACKING +u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size) +{ + int n = 0; + int i; + struct mali_group *child; + struct mali_group *temp; + + if (mali_group_is_virtual(group)) { + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP Group: %p\n", group); + } else if (mali_group_is_in_virtual(group)) { + n += _mali_osk_snprintf(buf + n, size - n, + "Child PP Group: %p\n", group); + } else if (NULL != group->pp_core) { + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP Group: %p\n", group); + } else { + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + n += _mali_osk_snprintf(buf + n, size - n, + "GP Group: %p\n", group); + } + + switch (group->state) { + case MALI_GROUP_STATE_INACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: INACTIVE\n"); + break; + case MALI_GROUP_STATE_ACTIVATION_PENDING: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: ACTIVATION_PENDING\n"); + break; + case MALI_GROUP_STATE_ACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: MALI_GROUP_STATE_ACTIVE\n"); + break; + default: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: UNKNOWN (%d)\n", group->state); + MALI_DEBUG_ASSERT(0); + break; + } + + n += _mali_osk_snprintf(buf + n, size - n, + "\tSW power: %s\n", + group->power_is_on ? "On" : "Off"); + + n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n); + + for (i = 0; i < 2; i++) { + if (NULL != group->l2_cache_core[i]) { + struct mali_pm_domain *domain; + domain = mali_l2_cache_get_pm_domain( + group->l2_cache_core[i]); + n += mali_pm_dump_state_domain(domain, + buf + n, size - n); + } + } + + if (group->gp_core) { + n += mali_gp_dump_state(group->gp_core, buf + n, size - n); + n += _mali_osk_snprintf(buf + n, size - n, + "\tGP running job: %p\n", group->gp_running_job); + } + + if (group->pp_core) { + n += mali_pp_dump_state(group->pp_core, buf + n, size - n); + n += _mali_osk_snprintf(buf + n, size - n, + "\tPP running job: %p, subjob %d \n", + group->pp_running_job, + group->pp_running_sub_job); + } + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, + struct mali_group, group_list) { + n += mali_group_dump_state(child, buf + n, size - n); + } + + return n; +} +#endif + +_mali_osk_errcode_t mali_group_upper_half_mmu(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + + ret = mali_executor_interrupt_mmu(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + + return ret; +} + +static void mali_group_bottom_half_mmu(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } + + mali_executor_interrupt_mmu(group, MALI_FALSE); + + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } +} + +_mali_osk_errcode_t mali_group_upper_half_gp(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); + + MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n", + mali_gp_get_rawstat(group->gp_core), + mali_group_core_description(group))); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + ret = mali_executor_interrupt_gp(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + return ret; +} + +static void mali_group_bottom_half_gp(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); + + mali_executor_interrupt_gp(group, MALI_FALSE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); +} + +_mali_osk_errcode_t mali_group_upper_half_pp(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); + + MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n", + mali_pp_get_rawstat(group->pp_core), + mali_group_core_description(group))); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + + ret = mali_executor_interrupt_pp(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + return ret; +} + +static void mali_group_bottom_half_pp(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); + + mali_executor_interrupt_pp(group, MALI_FALSE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); +} + +static void mali_group_timeout(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + MALI_DEBUG_ASSERT_POINTER(group); + + MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n", + mali_group_core_description(group), + _mali_osk_time_tickcount())); + + if (NULL != group->gp_core) { + mali_group_schedule_bottom_half_gp(group); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + mali_group_schedule_bottom_half_pp(group); + } +} + +static void mali_group_out_of_memory(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + mali_executor_group_oom(group); +} + +mali_bool mali_group_zap_session(struct mali_group *group, + struct mali_session_data *session) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (group->session != session) { + /* not running from this session */ + return MALI_TRUE; /* success */ + } + + if (group->is_working) { + /* The Zap also does the stall and disable_stall */ + mali_bool zap_success = mali_mmu_zap_tlb(group->mmu); + return zap_success; + } else { + /* Just remove the session instead of zapping */ + mali_group_clear_session(group); + return MALI_TRUE; /* success */ + } +} + +#if defined(CONFIG_MALI400_PROFILING) +static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num) +{ + u32 source0 = 0; + u32 value0 = 0; + u32 source1 = 0; + u32 value1 = 0; + u32 profiling_channel = 0; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + switch (core_num) { + case 0: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS; + break; + case 1: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS; + break; + case 2: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS; + break; + default: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS; + break; + } + + if (0 == core_num) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } + if (1 == core_num) { + if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1); + } + } + if (2 == core_num) { + if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1); + } + } + + _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0); +} +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ diff --git a/drivers/gpu/arm/utgard/common/mali_group.h b/drivers/gpu/arm/utgard/common/mali_group.h new file mode 100644 index 000000000000..705605ec1d9f --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_group.h @@ -0,0 +1,467 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_GROUP_H__ +#define __MALI_GROUP_H__ + +#include "mali_osk.h" +#include "mali_l2_cache.h" +#include "mali_mmu.h" +#include "mali_gp.h" +#include "mali_pp.h" +#include "mali_session.h" +#include "mali_osk_profiling.h" + +/** + * @brief Default max runtime [ms] for a core job - used by timeout timers + */ +#define MALI_MAX_JOB_RUNTIME_DEFAULT 5000 + +extern int mali_max_job_runtime; + +#define MALI_MAX_NUMBER_OF_GROUPS 10 +#define MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS 8 + +enum mali_group_state { + MALI_GROUP_STATE_INACTIVE, + MALI_GROUP_STATE_ACTIVATION_PENDING, + MALI_GROUP_STATE_ACTIVE, +}; + +/** + * The structure represents a render group + * A render group is defined by all the cores that share the same Mali MMU + */ + +struct mali_group { + struct mali_mmu_core *mmu; + struct mali_session_data *session; + + enum mali_group_state state; + mali_bool power_is_on; + + mali_bool is_working; + unsigned long start_time; /* in ticks */ + + struct mali_gp_core *gp_core; + struct mali_gp_job *gp_running_job; + + struct mali_pp_core *pp_core; + struct mali_pp_job *pp_running_job; + u32 pp_running_sub_job; + + struct mali_pm_domain *pm_domain; + + struct mali_l2_cache_core *l2_cache_core[2]; + u32 l2_cache_core_ref_count[2]; + + /* Parent virtual group (if any) */ + struct mali_group *parent_group; + + struct mali_dlbu_core *dlbu_core; + struct mali_bcast_unit *bcast_core; + + /* Used for working groups which needs to be disabled */ + mali_bool disable_requested; + + /* Used by group to link child groups (for virtual group) */ + _mali_osk_list_t group_list; + + /* Used by executor module in order to link groups of same state */ + _mali_osk_list_t executor_list; + + /* Used by PM domains to link groups of same domain */ + _mali_osk_list_t pm_domain_list; + + _mali_osk_wq_work_t *bottom_half_work_mmu; + _mali_osk_wq_work_t *bottom_half_work_gp; + _mali_osk_wq_work_t *bottom_half_work_pp; + + _mali_osk_wq_work_t *oom_work_handler; + _mali_osk_timer_t *timeout_timer; +}; + +/** @brief Create a new Mali group object + * + * @return A pointer to a new group object + */ +struct mali_group *mali_group_create(struct mali_l2_cache_core *core, + struct mali_dlbu_core *dlbu, + struct mali_bcast_unit *bcast, + u32 domain_index); + +void mali_group_dump_status(struct mali_group *group); + +void mali_group_delete(struct mali_group *group); + +_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, + struct mali_mmu_core *mmu_core); +void mali_group_remove_mmu_core(struct mali_group *group); + +_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, + struct mali_gp_core *gp_core); +void mali_group_remove_gp_core(struct mali_group *group); + +_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, + struct mali_pp_core *pp_core); +void mali_group_remove_pp_core(struct mali_group *group); + +MALI_STATIC_INLINE const char *mali_group_core_description( + struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + if (NULL != group->pp_core) { + return mali_pp_core_description(group->pp_core); + } else { + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + return mali_gp_core_description(group->gp_core); + } +} + +MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + return (NULL != group->dlbu_core); +#else + return MALI_FALSE; +#endif +} + +/** @brief Check if a group is a part of a virtual group or not + */ +MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE; +#else + return MALI_FALSE; +#endif +} + +/** @brief Reset group + * + * This function will reset the entire group, + * including all the cores present in the group. + * + * @param group Pointer to the group to reset + */ +void mali_group_reset(struct mali_group *group); + +MALI_STATIC_INLINE struct mali_session_data *mali_group_get_session( + struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + return group->session; +} + +MALI_STATIC_INLINE void mali_group_clear_session(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (NULL != group->session) { + mali_mmu_activate_empty_page_directory(group->mmu); + group->session = NULL; + } +} + +enum mali_group_state mali_group_activate(struct mali_group *group); + +/* + * Change state from ACTIVATION_PENDING to ACTIVE + * For virtual group, all childs need to be ACTIVE first + */ +mali_bool mali_group_set_active(struct mali_group *group); + +/* + * @return MALI_TRUE means one or more domains can now be powered off, + * and caller should call either mali_pm_update_async() or + * mali_pm_update_sync() in order to do so. + */ +mali_bool mali_group_deactivate(struct mali_group *group); + +MALI_STATIC_INLINE enum mali_group_state mali_group_get_state(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return group->state; +} + +MALI_STATIC_INLINE mali_bool mali_group_power_is_on(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + return group->power_is_on; +} + +void mali_group_power_up(struct mali_group *group); +void mali_group_power_down(struct mali_group *group); + +MALI_STATIC_INLINE void mali_group_set_disable_request( + struct mali_group *group, mali_bool disable) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + group->disable_requested = disable; + + /** + * When one of child group's disable_requeset is set TRUE, then + * the disable_request of parent group should also be set to TRUE. + * While, the disable_request of parent group should only be set to FALSE + * only when all of its child group's disable_request are set to FALSE. + */ + if (NULL != group->parent_group && MALI_TRUE == disable) { + group->parent_group->disable_requested = disable; + } +} + +MALI_STATIC_INLINE mali_bool mali_group_disable_requested( + struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return group->disable_requested; +} + +/** @brief Virtual groups */ +void mali_group_add_group(struct mali_group *parent, struct mali_group *child); +struct mali_group *mali_group_acquire_group(struct mali_group *parent); +void mali_group_remove_group(struct mali_group *parent, struct mali_group *child); + +/** @brief Checks if the group is working. + */ +MALI_STATIC_INLINE mali_bool mali_group_is_working(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + if (mali_group_is_in_virtual(group)) { + struct mali_group *tmp_group = mali_executor_get_virtual_group(); + return tmp_group->is_working; + } + return group->is_working; +} + +MALI_STATIC_INLINE struct mali_gp_job *mali_group_get_running_gp_job(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return group->gp_running_job; +} + +/** @brief Zap MMU TLB on all groups + * + * Zap TLB on group if \a session is active. + */ +mali_bool mali_group_zap_session(struct mali_group *group, + struct mali_session_data *session); + +/** @brief Get pointer to GP core object + */ +MALI_STATIC_INLINE struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + return group->gp_core; +} + +/** @brief Get pointer to PP core object + */ +MALI_STATIC_INLINE struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + return group->pp_core; +} + +/** @brief Start GP job + */ +void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job); + +void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job); + +/** @brief Start virtual group Job on a virtual group +*/ +void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob); + + +/** @brief Start a subjob from a particular on a specific PP group +*/ +void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob); + + +/** @brief remove all the unused groups in tmp_unused group list, so that the group is in consistent status. + */ +void mali_group_non_dlbu_job_done_virtual(struct mali_group *group); + + +/** @brief Resume GP job that suspended waiting for more heap memory + */ +void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr); + +MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_gp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_gp_get_interrupt_result(group->gp_core); +} + +MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_pp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_pp_get_interrupt_result(group->pp_core); +} + +MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_mmu(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_mmu_get_interrupt_result(group->mmu); +} + +MALI_STATIC_INLINE mali_bool mali_group_gp_is_active(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_gp_is_active(group->gp_core); +} + +MALI_STATIC_INLINE mali_bool mali_group_pp_is_active(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_pp_is_active(group->pp_core); +} + +MALI_STATIC_INLINE mali_bool mali_group_has_timed_out(struct mali_group *group) +{ + unsigned long time_cost; + struct mali_group *tmp_group = group; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* if the group is in virtual need to use virtual_group's start time */ + if (mali_group_is_in_virtual(group)) { + tmp_group = mali_executor_get_virtual_group(); + } + + time_cost = _mali_osk_time_tickcount() - tmp_group->start_time; + if (_mali_osk_time_mstoticks(mali_max_job_runtime) <= time_cost) { + /* + * current tick is at or after timeout end time, + * so this is a valid timeout + */ + return MALI_TRUE; + } else { + /* + * Not a valid timeout. A HW interrupt probably beat + * us to it, and the timer wasn't properly deleted + * (async deletion used due to atomic context). + */ + return MALI_FALSE; + } +} + +MALI_STATIC_INLINE void mali_group_mask_all_interrupts_gp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_gp_mask_all_interrupts(group->gp_core); +} + +MALI_STATIC_INLINE void mali_group_mask_all_interrupts_pp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_pp_mask_all_interrupts(group->pp_core); +} + +MALI_STATIC_INLINE void mali_group_enable_interrupts_gp( + struct mali_group *group, + enum mali_interrupt_result exceptions) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + mali_gp_enable_interrupts(group->gp_core, exceptions); +} + +MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + _mali_osk_wq_schedule_work(group->bottom_half_work_gp); +} + +MALI_STATIC_INLINE void mali_group_schedule_oom_work_handler(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + _mali_osk_wq_schedule_work(group->oom_work_handler); +} + +MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + _mali_osk_wq_schedule_work(group->bottom_half_work_pp); +} + +MALI_STATIC_INLINE void mali_group_schedule_bottom_half_mmu(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + _mali_osk_wq_schedule_work(group->bottom_half_work_mmu); +} + +struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job); + +struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success); + +#if defined(CONFIG_MALI400_PROFILING) +MALI_STATIC_INLINE void mali_group_oom(struct mali_group *group) +{ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + 0, 0, 0, 0, 0); +} +#endif + +struct mali_group *mali_group_get_glob_group(u32 index); +u32 mali_group_get_glob_num_groups(void); + +u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size); + + +_mali_osk_errcode_t mali_group_upper_half_mmu(void *data); +_mali_osk_errcode_t mali_group_upper_half_gp(void *data); +_mali_osk_errcode_t mali_group_upper_half_pp(void *data); + +MALI_STATIC_INLINE mali_bool mali_group_is_empty(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT(mali_group_is_virtual(group)); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return _mali_osk_list_empty(&group->group_list); +} + +#endif /* __MALI_GROUP_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_hw_core.c b/drivers/gpu/arm/utgard/common/mali_hw_core.c new file mode 100644 index 000000000000..c90cf38d8516 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_hw_core.c @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_hw_core.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_osk_mali.h" + +_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size) +{ + core->phys_addr = resource->base; + core->phys_offset = resource->base - _mali_osk_resource_base_address(); + core->description = resource->description; + core->size = reg_size; + + MALI_DEBUG_ASSERT(core->phys_offset < core->phys_addr); + + if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description)) { + core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description); + if (NULL != core->mapped_registers) { + return _MALI_OSK_ERR_OK; + } else { + MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr)); + } + _mali_osk_mem_unreqregion(core->phys_addr, core->size); + } else { + MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr)); + } + + return _MALI_OSK_ERR_FAULT; +} + +void mali_hw_core_delete(struct mali_hw_core *core) +{ + if (NULL != core->mapped_registers) { + _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers); + core->mapped_registers = NULL; + } + _mali_osk_mem_unreqregion(core->phys_addr, core->size); +} diff --git a/drivers/gpu/arm/utgard/common/mali_hw_core.h b/drivers/gpu/arm/utgard/common/mali_hw_core.h new file mode 100644 index 000000000000..ac2ffbedf308 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_hw_core.h @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_HW_CORE_H__ +#define __MALI_HW_CORE_H__ + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +/** + * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU) + * This struct is embedded inside all core specific structs. + */ +struct mali_hw_core { + uintptr_t phys_addr; /**< Physical address of the registers */ + u32 phys_offset; /**< Offset from start of Mali to registers */ + u32 size; /**< Size of registers */ + mali_io_address mapped_registers; /**< Virtual mapping of the registers */ + const char *description; /**< Name of unit (as specified in device configuration) */ +}; + +#define MALI_REG_POLL_COUNT_FAST 1000000 +#define MALI_REG_POLL_COUNT_SLOW 1000000 + +/* + * GP and PP core translate their int_stat/rawstat into one of these + */ +enum mali_interrupt_result { + MALI_INTERRUPT_RESULT_NONE, + MALI_INTERRUPT_RESULT_SUCCESS, + MALI_INTERRUPT_RESULT_SUCCESS_VS, + MALI_INTERRUPT_RESULT_SUCCESS_PLBU, + MALI_INTERRUPT_RESULT_OOM, + MALI_INTERRUPT_RESULT_ERROR +}; + +_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size); +void mali_hw_core_delete(struct mali_hw_core *core); + +MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address) +{ + u32 read_val; + read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address); + MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n", + core->description, relative_address, read_val)); + return read_val; +} + +MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val) +{ + MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n", + core->description, relative_address, new_val)); + _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val); +} + +/* Conditionally write a register. + * The register will only be written if the new value is different from the old_value. + * If the new value is different, the old value will also be updated */ +MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val) +{ + MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n", + core->description, relative_address, new_val)); + if (old_val != new_val) { + _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val); + } +} + +MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val) +{ + MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n", + core->description, relative_address, new_val)); + _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val); +} + +MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs) +{ + u32 i; + MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n", + core->description, relative_address, nr_of_regs)); + + /* Do not use burst writes against the registers */ + for (i = 0; i < nr_of_regs; i++) { + mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]); + } +} + +/* Conditionally write a set of registers. + * The register will only be written if the new value is different from the old_value. + * If the new value is different, the old value will also be updated */ +MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32 *old_array) +{ + u32 i; + MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n", + core->description, relative_address, nr_of_regs)); + + /* Do not use burst writes against the registers */ + for (i = 0; i < nr_of_regs; i++) { + if (old_array[i] != write_array[i]) { + mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]); + } + } +} + +#endif /* __MALI_HW_CORE_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_common.h b/drivers/gpu/arm/utgard/common/mali_kernel_common.h new file mode 100644 index 000000000000..990cf3ac78e0 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_kernel_common.h @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_COMMON_H__ +#define __MALI_KERNEL_COMMON_H__ + +#include "mali_osk.h" + +/* Make sure debug is defined when it should be */ +#ifndef DEBUG +#if defined(_DEBUG) +#define DEBUG +#endif +#endif + +/* The file include several useful macros for error checking, debugging and printing. + * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds. + * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL. + * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message. + * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit. + * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer. + * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds. + * + * The (X) means that you must add an extra parenthesis around the argumentlist. + * + * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg + * + * Suggested range for the DEBUG-LEVEL is [1:6] where + * [1:2] Is messages with highest priority, indicate possible errors. + * [3:4] Is messages with medium priority, output important variables. + * [5:6] Is messages with low priority, used during extensive debugging. + */ + +/** +* Fundamental error macro. Reports an error code. This is abstracted to allow us to +* easily switch to a different error reporting method if we want, and also to allow +* us to search for error returns easily. +* +* Note no closing semicolon - this is supplied in typical usage: +* +* MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY); +*/ +#define MALI_ERROR(error_code) return (error_code) + +/** + * Basic error macro, to indicate success. + * Note no closing semicolon - this is supplied in typical usage: + * + * MALI_SUCCESS; + */ +#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK) + +/** + * Basic error macro. This checks whether the given condition is true, and if not returns + * from this function with the supplied error code. This is a macro so that we can override it + * for stress testing. + * + * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling + * else clauses. Note also no closing semicolon - this is supplied in typical usage: + * + * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT); + */ +#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0) + +/** + * Error propagation macro. If the expression given is anything other than + * _MALI_OSK_NO_ERROR, then the value is returned from the enclosing function + * as an error code. This effectively acts as a guard clause, and propagates + * error values up the call stack. This uses a temporary value to ensure that + * the error expression is not evaluated twice. + * If the counter for forcing a failure has been set using _mali_force_error, + * this error will be returned without evaluating the expression in + * MALI_CHECK_NO_ERROR + */ +#define MALI_CHECK_NO_ERROR(expression) \ + do { _mali_osk_errcode_t _check_no_error_result=(expression); \ + if(_check_no_error_result != _MALI_OSK_ERR_OK) \ + MALI_ERROR(_check_no_error_result); \ + } while(0) + +/** + * Pointer check macro. Checks non-null pointer. + */ +#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) ) + +/** + * Error macro with goto. This checks whether the given condition is true, and if not jumps + * to the specified label using a goto. The label must therefore be local to the function in + * which this macro appears. This is most usually used to execute some clean-up code before + * exiting with a call to ERROR. + * + * Like the other macros, this is a macro to allow us to override the condition if we wish, + * e.g. to force an error during stress testing. + */ +#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0) + +/** + * Explicitly ignore a parameter passed into a function, to suppress compiler warnings. + * Should only be used with parameter names. + */ +#define MALI_IGNORE(x) x=x + +#if defined(CONFIG_MALI_QUIET) +#define MALI_PRINTF(args) +#else +#define MALI_PRINTF(args) _mali_osk_dbgmsg args; +#endif + +#define MALI_PRINT_ERROR(args) do{ \ + MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \ + MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \ + MALI_PRINTF(args); \ + MALI_PRINTF(("\n")); \ + } while(0) + +#define MALI_PRINT(args) do{ \ + MALI_PRINTF(("Mali: ")); \ + MALI_PRINTF(args); \ + } while (0) + +#ifdef DEBUG +#ifndef mali_debug_level +extern int mali_debug_level; +#endif + +#define MALI_DEBUG_CODE(code) code +#define MALI_DEBUG_PRINT(level, args) do { \ + if((level) <= mali_debug_level)\ + {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \ + } while (0) + +#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args) + +#define MALI_DEBUG_PRINT_IF(level,condition,args) \ + if((condition)&&((level) <= mali_debug_level))\ + {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } + +#define MALI_DEBUG_PRINT_ELSE(level, args)\ + else if((level) <= mali_debug_level)\ + { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } + +/** + * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint + * to be entered (see _mali_osk_break() ). An alternative would be to call + * _mali_osk_abort(), on OSs that support it. + */ +#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0) +#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0) +#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0) + +#else /* DEBUG */ + +#define MALI_DEBUG_CODE(code) +#define MALI_DEBUG_PRINT(string,args) do {} while(0) +#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0) +#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0) +#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0) +#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0) +#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0) +#define MALI_DEBUG_ASSERT(condition) do {} while(0) + +#endif /* DEBUG */ + +/** + * variables from user space cannot be dereferenced from kernel space; tagging them + * with __user allows the GCC compiler to generate a warning. Other compilers may + * not support this so we define it here as an empty macro if the compiler doesn't + * define it. + */ +#ifndef __user +#define __user +#endif + +#endif /* __MALI_KERNEL_COMMON_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_core.c b/drivers/gpu/arm/utgard/common/mali_kernel_core.c new file mode 100644 index 000000000000..b9a6b7a0a277 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_kernel_core.c @@ -0,0 +1,1314 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_ukk.h" +#include "mali_kernel_core.h" +#include "mali_memory.h" +#include "mali_mem_validation.h" +#include "mali_mmu.h" +#include "mali_mmu_page_directory.h" +#include "mali_dlbu.h" +#include "mali_broadcast.h" +#include "mali_gp.h" +#include "mali_pp.h" +#include "mali_executor.h" +#include "mali_pp_job.h" +#include "mali_group.h" +#include "mali_pm.h" +#include "mali_pmu.h" +#include "mali_scheduler.h" +#include "mali_kernel_utilization.h" +#include "mali_l2_cache.h" +#include "mali_timeline.h" +#include "mali_soft_job.h" +#include "mali_pm_domain.h" +#if defined(CONFIG_MALI400_PROFILING) +#include "mali_osk_profiling.h" +#endif +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) +#include "mali_profiling_internal.h" +#endif +#include "mali_control_timer.h" +#include "mali_dvfs_policy.h" +#include <linux/sched.h> + +#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff + +/* Mali GPU memory. Real values come from module parameter or from device specific data */ +unsigned int mali_dedicated_mem_start = 0; +unsigned int mali_dedicated_mem_size = 0; + +/* Default shared memory size is set to 4G. */ +unsigned int mali_shared_mem_size = MALI_SHARED_MEMORY_DEFAULT_SIZE; + +/* Frame buffer memory to be accessible by Mali GPU */ +int mali_fb_start = 0; +int mali_fb_size = 0; + +/* Mali max job runtime */ +extern int mali_max_job_runtime; + +/** Start profiling from module load? */ +int mali_boot_profiling = 0; + +/** Limits for the number of PP cores behind each L2 cache. */ +int mali_max_pp_cores_group_1 = 0xFF; +int mali_max_pp_cores_group_2 = 0xFF; + +int mali_inited_pp_cores_group_1 = 0; +int mali_inited_pp_cores_group_2 = 0; + +static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN; +static uintptr_t global_gpu_base_address = 0; +static u32 global_gpu_major_version = 0; +static u32 global_gpu_minor_version = 0; + +mali_bool mali_gpu_class_is_mali450 = MALI_FALSE; +mali_bool mali_gpu_class_is_mali470 = MALI_FALSE; + +static _mali_osk_errcode_t mali_set_global_gpu_base_address(void) +{ + _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; + + global_gpu_base_address = _mali_osk_resource_base_address(); + if (0 == global_gpu_base_address) { + err = _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + return err; +} + +static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp) +{ + switch (resource_pp->base - global_gpu_base_address) { + case 0x08000: + case 0x20000: /* fall-through for aliased mapping */ + return 0x01; + case 0x0A000: + case 0x22000: /* fall-through for aliased mapping */ + return 0x02; + case 0x0C000: + case 0x24000: /* fall-through for aliased mapping */ + return 0x04; + case 0x0E000: + case 0x26000: /* fall-through for aliased mapping */ + return 0x08; + case 0x28000: + return 0x10; + case 0x2A000: + return 0x20; + case 0x2C000: + return 0x40; + case 0x2E000: + return 0x80; + default: + return 0; + } +} + +static _mali_osk_errcode_t mali_parse_product_info(void) +{ + _mali_osk_resource_t first_pp_resource; + + /* Find the first PP core resource (again) */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PP0, &first_pp_resource)) { + /* Create a dummy PP object for this core so that we can read the version register */ + struct mali_group *group = mali_group_create(NULL, NULL, NULL, MALI_DOMAIN_INDEX_PP0); + if (NULL != group) { + struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource)); + if (NULL != pp_core) { + u32 pp_version; + + pp_version = mali_pp_core_get_version(pp_core); + + mali_group_delete(group); + + global_gpu_major_version = (pp_version >> 8) & 0xFF; + global_gpu_minor_version = pp_version & 0xFF; + + switch (pp_version >> 16) { + case MALI200_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI200; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n")); + _mali_osk_abort(); + break; + case MALI300_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI300; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; + case MALI400_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI400; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; + case MALI450_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI450; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; + case MALI470_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI470; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-470 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; + default: + MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version)); + return _MALI_OSK_ERR_FAULT; + } + + return _MALI_OSK_ERR_OK; + } else { + MALI_PRINT_ERROR(("Failed to create initial PP object\n")); + } + } else { + MALI_PRINT_ERROR(("Failed to create initial group object\n")); + } + } else { + MALI_PRINT_ERROR(("First PP core not specified in config file\n")); + } + + return _MALI_OSK_ERR_FAULT; +} + +static void mali_delete_groups(void) +{ + struct mali_group *group; + + group = mali_group_get_glob_group(0); + while (NULL != group) { + mali_group_delete(group); + group = mali_group_get_glob_group(0); + } + + MALI_DEBUG_ASSERT(0 == mali_group_get_glob_num_groups()); +} + +static void mali_delete_l2_cache_cores(void) +{ + struct mali_l2_cache_core *l2; + + l2 = mali_l2_cache_core_get_glob_l2_core(0); + while (NULL != l2) { + mali_l2_cache_delete(l2); + l2 = mali_l2_cache_core_get_glob_l2_core(0); + } + + MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores()); +} + +static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource, u32 domain_index) +{ + struct mali_l2_cache_core *l2_cache = NULL; + + if (NULL != resource) { + + MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description)); + + l2_cache = mali_l2_cache_create(resource, domain_index); + if (NULL == l2_cache) { + MALI_PRINT_ERROR(("Failed to create L2 cache object\n")); + return NULL; + } + } + MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n")); + + return l2_cache; +} + +static _mali_osk_errcode_t mali_parse_config_l2_cache(void) +{ + struct mali_l2_cache_core *l2_cache = NULL; + + if (mali_is_mali400()) { + _mali_osk_resource_t l2_resource; + if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(MALI400_OFFSET_L2_CACHE0, &l2_resource)) { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n")); + return _MALI_OSK_ERR_FAULT; + } + + l2_cache = mali_create_l2_cache_core(&l2_resource, MALI_DOMAIN_INDEX_L20); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else if (mali_is_mali450()) { + /* + * L2 for GP at 0x10000 + * L2 for PP0-3 at 0x01000 + * L2 for PP4-7 at 0x11000 (optional) + */ + + _mali_osk_resource_t l2_gp_resource; + _mali_osk_resource_t l2_pp_grp0_resource; + _mali_osk_resource_t l2_pp_grp1_resource; + + /* Make cluster for GP's L2 */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE0, &l2_gp_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n")); + l2_cache = mali_create_l2_cache_core(&l2_gp_resource, MALI_DOMAIN_INDEX_L20); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n")); + return _MALI_OSK_ERR_FAULT; + } + + /* Find corresponding l2 domain */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE1, &l2_pp_grp0_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n")); + l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource, MALI_DOMAIN_INDEX_L21); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n")); + return _MALI_OSK_ERR_FAULT; + } + + /* Second PP core group is optional, don't fail if we don't find it */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE2, &l2_pp_grp1_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n")); + l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource, MALI_DOMAIN_INDEX_L22); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } + } else if (mali_is_mali470()) { + _mali_osk_resource_t l2c1_resource; + + /* Make cluster for L2C1 */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI470_OFFSET_L2_CACHE1, &l2c1_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-470 L2 cache 1\n")); + l2_cache = mali_create_l2_cache_core(&l2c1_resource, MALI_DOMAIN_INDEX_L21); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for L2C1\n")); + return _MALI_OSK_ERR_FAULT; + } + } + + return _MALI_OSK_ERR_OK; +} + +static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache, + _mali_osk_resource_t *resource_mmu, + _mali_osk_resource_t *resource_gp, + _mali_osk_resource_t *resource_pp, + u32 domain_index) +{ + struct mali_mmu_core *mmu; + struct mali_group *group; + + MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description)); + + /* Create the group object */ + group = mali_group_create(cache, NULL, NULL, domain_index); + if (NULL == group) { + MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description)); + return NULL; + } + + /* Create the MMU object inside group */ + mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE); + if (NULL == mmu) { + MALI_PRINT_ERROR(("Failed to create MMU object\n")); + mali_group_delete(group); + return NULL; + } + + if (NULL != resource_gp) { + /* Create the GP core object inside this group */ + struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group); + if (NULL == gp_core) { + /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */ + MALI_PRINT_ERROR(("Failed to create GP object\n")); + mali_group_delete(group); + return NULL; + } + } + + if (NULL != resource_pp) { + struct mali_pp_core *pp_core; + + /* Create the PP core object inside this group */ + pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp)); + if (NULL == pp_core) { + /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */ + MALI_PRINT_ERROR(("Failed to create PP object\n")); + mali_group_delete(group); + return NULL; + } + } + + return group; +} + +static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast, + _mali_osk_resource_t *resource_pp_bcast, + _mali_osk_resource_t *resource_dlbu, + _mali_osk_resource_t *resource_bcast) +{ + struct mali_mmu_core *mmu_pp_bcast_core; + struct mali_pp_core *pp_bcast_core; + struct mali_dlbu_core *dlbu_core; + struct mali_bcast_unit *bcast_core; + struct mali_group *group; + + MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description)); + + /* Create the DLBU core object */ + dlbu_core = mali_dlbu_create(resource_dlbu); + if (NULL == dlbu_core) { + MALI_PRINT_ERROR(("Failed to create DLBU object \n")); + return _MALI_OSK_ERR_FAULT; + } + + /* Create the Broadcast unit core */ + bcast_core = mali_bcast_unit_create(resource_bcast); + if (NULL == bcast_core) { + MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n")); + mali_dlbu_delete(dlbu_core); + return _MALI_OSK_ERR_FAULT; + } + + /* Create the group object */ +#if defined(DEBUG) + /* Get a physical PP group to temporarily add to broadcast unit. IRQ + * verification needs a physical group in the broadcast unit to test + * the broadcast unit interrupt line. */ + { + struct mali_group *phys_group = NULL; + int i; + for (i = 0; i < mali_group_get_glob_num_groups(); i++) { + phys_group = mali_group_get_glob_group(i); + if (NULL != mali_group_get_pp_core(phys_group)) break; + } + MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(phys_group)); + + /* Add the group temporarily to the broadcast, and update the + * broadcast HW. Since the HW is not updated when removing the + * group the IRQ check will work when the virtual PP is created + * later. + * + * When the virtual group gets populated, the actually used + * groups will be added to the broadcast unit and the HW will + * be updated. + */ + mali_bcast_add_group(bcast_core, phys_group); + mali_bcast_reset(bcast_core); + mali_bcast_remove_group(bcast_core, phys_group); + } +#endif /* DEBUG */ + group = mali_group_create(NULL, dlbu_core, bcast_core, MALI_DOMAIN_INDEX_DUMMY); + if (NULL == group) { + MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description)); + mali_bcast_unit_delete(bcast_core); + mali_dlbu_delete(dlbu_core); + return _MALI_OSK_ERR_FAULT; + } + + /* Create the MMU object inside group */ + mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE); + if (NULL == mmu_pp_bcast_core) { + MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n")); + mali_group_delete(group); + return _MALI_OSK_ERR_FAULT; + } + + /* Create the PP core object inside this group */ + pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0); + if (NULL == pp_bcast_core) { + /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */ + MALI_PRINT_ERROR(("Failed to create PP object\n")); + mali_group_delete(group); + return _MALI_OSK_ERR_FAULT; + } + + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_parse_config_groups(void) +{ + struct mali_group *group; + int cluster_id_gp = 0; + int cluster_id_pp_grp0 = 0; + int cluster_id_pp_grp1 = 0; + int i; + + _mali_osk_resource_t resource_gp; + _mali_osk_resource_t resource_gp_mmu; + _mali_osk_resource_t resource_pp[8]; + _mali_osk_resource_t resource_pp_mmu[8]; + _mali_osk_resource_t resource_pp_mmu_bcast; + _mali_osk_resource_t resource_pp_bcast; + _mali_osk_resource_t resource_dlbu; + _mali_osk_resource_t resource_bcast; + _mali_osk_errcode_t resource_gp_found; + _mali_osk_errcode_t resource_gp_mmu_found; + _mali_osk_errcode_t resource_pp_found[8]; + _mali_osk_errcode_t resource_pp_mmu_found[8]; + _mali_osk_errcode_t resource_pp_mmu_bcast_found; + _mali_osk_errcode_t resource_pp_bcast_found; + _mali_osk_errcode_t resource_dlbu_found; + _mali_osk_errcode_t resource_bcast_found; + + if (!(mali_is_mali400() || mali_is_mali450() || mali_is_mali470())) { + /* No known HW core */ + return _MALI_OSK_ERR_FAULT; + } + + if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) { + /* Group settings are not overridden by module parameters, so use device settings */ + _mali_osk_device_data data = { 0, }; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Use device specific settings (if defined) */ + if (0 != data.max_job_runtime) { + mali_max_job_runtime = data.max_job_runtime; + } + } + } + + if (mali_is_mali450()) { + /* Mali-450 have separate L2s for GP, and PP core group(s) */ + cluster_id_pp_grp0 = 1; + cluster_id_pp_grp1 = 2; + } + + resource_gp_found = _mali_osk_resource_find(MALI_OFFSET_GP, &resource_gp); + resource_gp_mmu_found = _mali_osk_resource_find(MALI_OFFSET_GP_MMU, &resource_gp_mmu); + resource_pp_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0, &(resource_pp[0])); + resource_pp_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1, &(resource_pp[1])); + resource_pp_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2, &(resource_pp[2])); + resource_pp_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3, &(resource_pp[3])); + resource_pp_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4, &(resource_pp[4])); + resource_pp_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5, &(resource_pp[5])); + resource_pp_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6, &(resource_pp[6])); + resource_pp_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7, &(resource_pp[7])); + resource_pp_mmu_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0_MMU, &(resource_pp_mmu[0])); + resource_pp_mmu_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1_MMU, &(resource_pp_mmu[1])); + resource_pp_mmu_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2_MMU, &(resource_pp_mmu[2])); + resource_pp_mmu_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3_MMU, &(resource_pp_mmu[3])); + resource_pp_mmu_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4_MMU, &(resource_pp_mmu[4])); + resource_pp_mmu_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5_MMU, &(resource_pp_mmu[5])); + resource_pp_mmu_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6_MMU, &(resource_pp_mmu[6])); + resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7])); + + + if (mali_is_mali450() || mali_is_mali470()) { + resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast); + resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu); + resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast); + resource_pp_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST, &resource_pp_bcast); + + if (_MALI_OSK_ERR_OK != resource_bcast_found || + _MALI_OSK_ERR_OK != resource_dlbu_found || + _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found || + _MALI_OSK_ERR_OK != resource_pp_bcast_found) { + /* Missing mandatory core(s) for Mali-450 or Mali-470 */ + MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n")); + return _MALI_OSK_ERR_FAULT; + } + } + + if (_MALI_OSK_ERR_OK != resource_gp_found || + _MALI_OSK_ERR_OK != resource_gp_mmu_found || + _MALI_OSK_ERR_OK != resource_pp_found[0] || + _MALI_OSK_ERR_OK != resource_pp_mmu_found[0]) { + /* Missing mandatory core(s) */ + MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n")); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores()); + group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL, MALI_DOMAIN_INDEX_GP); + if (NULL == group) { + return _MALI_OSK_ERR_FAULT; + } + + /* Create group for first (and mandatory) PP core */ + MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */ + group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0], MALI_DOMAIN_INDEX_PP0); + if (NULL == group) { + return _MALI_OSK_ERR_FAULT; + } + + mali_inited_pp_cores_group_1++; + + /* Create groups for rest of the cores in the first PP core group */ + for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */ + if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) { + if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) { + group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i); + if (NULL == group) { + return _MALI_OSK_ERR_FAULT; + } + + mali_inited_pp_cores_group_1++; + } + } + } + + /* Create groups for cores in the second PP core group */ + for (i = 4; i < 8; i++) { /* Second half of the PP cores belong to second core group */ + if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) { + if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) { + MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */ + group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i); + if (NULL == group) { + return _MALI_OSK_ERR_FAULT; + } + + mali_inited_pp_cores_group_2++; + } + } + } + + if (mali_is_mali450() || mali_is_mali470()) { + _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast); + if (_MALI_OSK_ERR_OK != err) { + return err; + } + } + + mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1; + mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2; + MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2)); + + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_check_shared_interrupts(void) +{ +#if !defined(CONFIG_MALI_SHARED_INTERRUPTS) + if (MALI_TRUE == _mali_osk_shared_interrupts()) { + MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n")); + return _MALI_OSK_ERR_FAULT; + } +#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */ + + /* It is OK to compile support for shared interrupts even if Mali is not using it. */ + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_parse_config_pmu(void) +{ + _mali_osk_resource_t resource_pmu; + + MALI_DEBUG_ASSERT(0 != global_gpu_base_address); + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PMU, &resource_pmu)) { + struct mali_pmu_core *pmu; + + pmu = mali_pmu_create(&resource_pmu); + if (NULL == pmu) { + MALI_PRINT_ERROR(("Failed to create PMU\n")); + return _MALI_OSK_ERR_FAULT; + } + } + + /* It's ok if the PMU doesn't exist */ + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_parse_config_memory(void) +{ + _mali_osk_device_data data = { 0, }; + _mali_osk_errcode_t ret; + + /* The priority of setting the value of mali_shared_mem_size, + * mali_dedicated_mem_start and mali_dedicated_mem_size: + * 1. module parameter; + * 2. platform data; + * 3. default value; + **/ + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Memory settings are not overridden by module parameters, so use device settings */ + if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size) { + /* Use device specific settings (if defined) */ + mali_dedicated_mem_start = data.dedicated_mem_start; + mali_dedicated_mem_size = data.dedicated_mem_size; + } + + if (MALI_SHARED_MEMORY_DEFAULT_SIZE == mali_shared_mem_size && + 0 != data.shared_mem_size) { + mali_shared_mem_size = data.shared_mem_size; + } + } + + if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) { + MALI_DEBUG_PRINT(2, ("Mali memory settings (dedicated: 0x%08X@0x%08X)\n", + mali_dedicated_mem_size, mali_dedicated_mem_start)); + + /* Dedicated memory */ + ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to register dedicated memory\n")); + mali_memory_terminate(); + return ret; + } + } + + if (0 < mali_shared_mem_size) { + MALI_DEBUG_PRINT(2, ("Mali memory settings (shared: 0x%08X)\n", mali_shared_mem_size)); + + /* Shared OS memory */ + ret = mali_memory_core_resource_os_memory(mali_shared_mem_size); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to register shared OS memory\n")); + mali_memory_terminate(); + return ret; + } + } + + if (0 == mali_fb_start && 0 == mali_fb_size) { + /* Frame buffer settings are not overridden by module parameters, so use device settings */ + _mali_osk_device_data data = { 0, }; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Use device specific settings (if defined) */ + mali_fb_start = data.fb_start; + mali_fb_size = data.fb_size; + } + + MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n", + mali_fb_size, mali_fb_start)); + } else { + MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n", + mali_fb_size, mali_fb_start)); + } + + if (0 != mali_fb_size) { + /* Register frame buffer */ + ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n")); + mali_memory_terminate(); + return ret; + } + } + + return _MALI_OSK_ERR_OK; +} + +static void mali_detect_gpu_class(void) +{ + if (_mali_osk_identify_gpu_resource() == 0x450) + mali_gpu_class_is_mali450 = MALI_TRUE; + + if (_mali_osk_identify_gpu_resource() == 0x470) + mali_gpu_class_is_mali470 = MALI_TRUE; +} + +static _mali_osk_errcode_t mali_init_hw_reset(void) +{ +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + _mali_osk_resource_t resource_bcast; + + /* Ensure broadcast unit is in a good state before we start creating + * groups and cores. + */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast)) { + struct mali_bcast_unit *bcast_core; + + bcast_core = mali_bcast_unit_create(&resource_bcast); + if (NULL == bcast_core) { + MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n")); + return _MALI_OSK_ERR_FAULT; + } + mali_bcast_unit_delete(bcast_core); + } +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_initialize_subsystems(void) +{ + _mali_osk_errcode_t err; + +#if defined(CONFIG_MALI_DT) && !defined(CONFIG_MALI_PLAT_SPECIFIC_DT) + err = _mali_osk_resource_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } +#endif + + mali_pp_job_initialize(); + + mali_timeline_initialize(); + + err = mali_session_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + +#if defined(CONFIG_MALI400_PROFILING) + err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE); + if (_MALI_OSK_ERR_OK != err) { + /* No biggie if we weren't able to initialize the profiling */ + MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n")); + } +#endif + + err = mali_memory_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + err = mali_executor_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + err = mali_scheduler_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Configure memory early, needed by mali_mmu_initialize. */ + err = mali_parse_config_memory(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + err = mali_set_global_gpu_base_address(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Detect GPU class (uses L2 cache count) */ + mali_detect_gpu_class(); + + err = mali_check_shared_interrupts(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Initialize the MALI PMU (will not touch HW!) */ + err = mali_parse_config_pmu(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Initialize the power management module */ + err = mali_pm_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Make sure the entire GPU stays on for the rest of this function */ + mali_pm_init_begin(); + + /* Ensure HW is in a good state before starting to access cores. */ + err = mali_init_hw_reset(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Detect which Mali GPU we are dealing with */ + err = mali_parse_product_info(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + /* The global_product_id is now populated with the correct Mali GPU */ + + /* Start configuring the actual Mali hardware. */ + + err = mali_mmu_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + if (mali_is_mali450() || mali_is_mali470()) { + err = mali_dlbu_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + } + + err = mali_parse_config_l2_cache(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + err = mali_parse_config_groups(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + /* Move groups into executor */ + mali_executor_populate(); + + /* Need call after all group has assigned a domain */ + mali_pm_power_cost_setup(); + + /* Initialize the GPU timer */ + err = mali_control_timer_init(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + /* Initialize the GPU utilization tracking */ + err = mali_utilization_init(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + +#if defined(CONFIG_MALI_DVFS) + err = mali_dvfs_policy_init(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } +#endif + + /* Allowing the system to be turned off */ + mali_pm_init_end(); + + return _MALI_OSK_ERR_OK; /* all ok */ +} + +void mali_terminate_subsystems(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n")); + + mali_utilization_term(); + mali_control_timer_term(); + + mali_executor_depopulate(); + mali_delete_groups(); /* Delete groups not added to executor */ + mali_executor_terminate(); + + mali_scheduler_terminate(); + mali_pp_job_terminate(); + mali_delete_l2_cache_cores(); + mali_mmu_terminate(); + + if (mali_is_mali450() || mali_is_mali470()) { + mali_dlbu_terminate(); + } + + mali_pm_terminate(); + + if (NULL != pmu) { + mali_pmu_delete(pmu); + } + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_term(); +#endif + + mali_memory_terminate(); + + mali_session_terminate(); + + mali_timeline_terminate(); + + global_gpu_base_address = 0; +} + +_mali_product_id_t mali_kernel_core_get_product_id(void) +{ + return global_product_id; +} + +u32 mali_kernel_core_get_gpu_major_version(void) +{ + return global_gpu_major_version; +} + +u32 mali_kernel_core_get_gpu_minor_version(void) +{ + return global_gpu_minor_version; +} + +_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + /* check compatability */ + if (args->version == _MALI_UK_API_VERSION) { + args->compatible = 1; + } else { + args->compatible = 0; + } + + args->version = _MALI_UK_API_VERSION; /* report our version */ + + /* success regardless of being compatible or not */ + MALI_SUCCESS; +} + +_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + /* check compatability */ + if (args->version == _MALI_UK_API_VERSION) { + args->compatible = 1; + } else { + args->compatible = 0; + } + + args->version = _MALI_UK_API_VERSION; /* report our version */ + + /* success regardless of being compatible or not */ + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args) +{ + _mali_osk_errcode_t err; + _mali_osk_notification_t *notification; + _mali_osk_notification_queue_t *queue; + struct mali_session_data *session; + + /* check input */ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + queue = session->ioctl_queue; + + /* if the queue does not exist we're currently shutting down */ + if (NULL == queue) { + MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n")); + args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS; + return _MALI_OSK_ERR_OK; + } + + /* receive a notification, might sleep */ + err = _mali_osk_notification_queue_receive(queue, ¬ification); + if (_MALI_OSK_ERR_OK != err) { + MALI_ERROR(err); /* errcode returned, pass on to caller */ + } + + /* copy the buffer to the user */ + args->type = (_mali_uk_notification_type)notification->notification_type; + _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size); + + /* finished with the notification */ + _mali_osk_notification_delete(notification); + + return _MALI_OSK_ERR_OK; /* all ok */ +} + +_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args) +{ + _mali_osk_notification_t *notification; + _mali_osk_notification_queue_t *queue; + struct mali_session_data *session; + + /* check input */ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + queue = session->ioctl_queue; + + /* if the queue does not exist we're currently shutting down */ + if (NULL == queue) { + MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n")); + return _MALI_OSK_ERR_OK; + } + + notification = _mali_osk_notification_create(args->type, 0); + if (NULL == notification) { + MALI_PRINT_ERROR(("Failed to create notification object\n")); + return _MALI_OSK_ERR_NOMEM; + } + + _mali_osk_notification_queue_send(queue, notification); + + return _MALI_OSK_ERR_OK; /* all ok */ +} + +_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args) +{ + wait_queue_head_t *queue; + + /* check input */ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + queue = mali_session_get_wait_queue(); + + /* check pending big job number, might sleep if larger than MAX allowed number */ + if (wait_event_interruptible(*queue, MALI_MAX_PENDING_BIG_JOB > mali_scheduler_job_gp_big_job_count())) { + return _MALI_OSK_ERR_RESTARTSYSCALL; + } + + return _MALI_OSK_ERR_OK; /* all ok */ +} + + +_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + + if (!session->use_high_priority_job_queue) { + session->use_high_priority_job_queue = MALI_TRUE; + MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid())); + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_open(void **context) +{ + u32 i; + struct mali_session_data *session; + + /* allocated struct to track this session */ + session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data)); + MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM); + + MALI_DEBUG_PRINT(3, ("Session starting\n")); + + /* create a response queue for this session */ + session->ioctl_queue = _mali_osk_notification_queue_init(); + if (NULL == session->ioctl_queue) { + goto err; + } + + session->page_directory = mali_mmu_pagedir_alloc(); + if (NULL == session->page_directory) { + goto err_mmu; + } + + if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) { + MALI_PRINT_ERROR(("Failed to map DLBU page into session\n")); + goto err_mmu; + } + + if (0 != mali_dlbu_phys_addr) { + mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr, + _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT); + } + + if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) { + goto err_session; + } + + /* Create soft system. */ + session->soft_job_system = mali_soft_job_system_create(session); + if (NULL == session->soft_job_system) { + goto err_soft; + } + + /* Create timeline system. */ + session->timeline_system = mali_timeline_system_create(session); + if (NULL == session->timeline_system) { + goto err_time_line; + } + +#if defined(CONFIG_MALI_DVFS) + _mali_osk_atomic_init(&session->number_of_window_jobs, 0); +#endif + + session->use_high_priority_job_queue = MALI_FALSE; + + /* Initialize list of PP jobs on this session. */ + _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_list); + + /* Initialize the pp_job_fb_lookup_list array used to quickly lookup jobs from a given frame builder */ + for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) { + _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]); + } + + session->pid = _mali_osk_get_pid(); + session->comm = _mali_osk_get_comm(); + session->max_mali_mem_allocated_size = 0; + for (i = 0; i < MALI_MEM_TYPE_MAX; i ++) { + atomic_set(&session->mali_mem_array[i], 0); + } + atomic_set(&session->mali_mem_allocated_pages, 0); + *context = (void *)session; + + /* Add session to the list of all sessions. */ + mali_session_add(session); + + MALI_DEBUG_PRINT(3, ("Session started\n")); + return _MALI_OSK_ERR_OK; + +err_time_line: + mali_soft_job_system_destroy(session->soft_job_system); +err_soft: + mali_memory_session_end(session); +err_session: + mali_mmu_pagedir_free(session->page_directory); +err_mmu: + _mali_osk_notification_queue_term(session->ioctl_queue); +err: + _mali_osk_free(session); + MALI_ERROR(_MALI_OSK_ERR_NOMEM); + +} + +#if defined(DEBUG) +/* parameter used for debug */ +extern u32 num_pm_runtime_resume; +extern u32 num_pm_updates; +extern u32 num_pm_updates_up; +extern u32 num_pm_updates_down; +#endif + +_mali_osk_errcode_t _mali_ukk_close(void **context) +{ + struct mali_session_data *session; + MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS); + session = (struct mali_session_data *)*context; + + MALI_DEBUG_PRINT(3, ("Session ending\n")); + + MALI_DEBUG_ASSERT_POINTER(session->soft_job_system); + MALI_DEBUG_ASSERT_POINTER(session->timeline_system); + + /* Remove session from list of all sessions. */ + mali_session_remove(session); + + /* This flag is used to prevent queueing of jobs due to activation. */ + session->is_aborting = MALI_TRUE; + + /* Stop the soft job timer. */ + mali_timeline_system_stop_timer(session->timeline_system); + + /* Abort queued jobs */ + mali_scheduler_abort_session(session); + + /* Abort executing jobs */ + mali_executor_abort_session(session); + + /* Abort the soft job system. */ + mali_soft_job_system_abort(session->soft_job_system); + + /* Force execution of all pending bottom half processing for GP and PP. */ + _mali_osk_wq_flush(); + + /* The session PP list should now be empty. */ + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_list)); + + /* At this point the GP and PP scheduler no longer has any jobs queued or running from this + * session, and all soft jobs in the soft job system has been destroyed. */ + + /* Any trackers left in the timeline system are directly or indirectly waiting on external + * sync fences. Cancel all sync fence waiters to trigger activation of all remaining + * trackers. This call will sleep until all timelines are empty. */ + mali_timeline_system_abort(session->timeline_system); + + /* Flush pending work. + * Needed to make sure all bottom half processing related to this + * session has been completed, before we free internal data structures. + */ + _mali_osk_wq_flush(); + + /* Destroy timeline system. */ + mali_timeline_system_destroy(session->timeline_system); + session->timeline_system = NULL; + + /* Destroy soft system. */ + mali_soft_job_system_destroy(session->soft_job_system); + session->soft_job_system = NULL; + + MALI_DEBUG_CODE({ + /* Check that the pp_job_fb_lookup_list array is empty. */ + u32 i; + for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) + { + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_fb_lookup_list[i])); + } + }); + + /* Free remaining memory allocated to this session */ + mali_memory_session_end(session); + +#if defined(CONFIG_MALI_DVFS) + _mali_osk_atomic_term(&session->number_of_window_jobs); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_stop_sampling(session->pid); +#endif + + /* Free session data structures */ + mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE); + mali_mmu_pagedir_free(session->page_directory); + _mali_osk_notification_queue_term(session->ioctl_queue); + _mali_osk_free(session); + + *context = NULL; + + MALI_DEBUG_PRINT(3, ("Session has ended\n")); + +#if defined(DEBUG) + MALI_DEBUG_PRINT(3, ("Stats: # runtime resumes: %u\n", num_pm_runtime_resume)); + MALI_DEBUG_PRINT(3, (" # PM updates: .... %u (up %u, down %u)\n", num_pm_updates, num_pm_updates_up, num_pm_updates_down)); + + num_pm_runtime_resume = 0; + num_pm_updates = 0; + num_pm_updates_up = 0; + num_pm_updates_down = 0; +#endif + + return _MALI_OSK_ERR_OK;; +} + +#if MALI_STATE_TRACKING +u32 _mali_kernel_core_dump_state(char *buf, u32 size) +{ + int n = 0; /* Number of bytes written to buf */ + + n += mali_scheduler_dump_state(buf + n, size - n); + n += mali_executor_dump_state(buf + n, size - n); + + return n; +} +#endif diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_core.h b/drivers/gpu/arm/utgard/common/mali_kernel_core.h new file mode 100644 index 000000000000..8cdbc5af3205 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_kernel_core.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_CORE_H__ +#define __MALI_KERNEL_CORE_H__ + +#include "mali_osk.h" + +typedef enum { + _MALI_PRODUCT_ID_UNKNOWN, + _MALI_PRODUCT_ID_MALI200, + _MALI_PRODUCT_ID_MALI300, + _MALI_PRODUCT_ID_MALI400, + _MALI_PRODUCT_ID_MALI450, + _MALI_PRODUCT_ID_MALI470, +} _mali_product_id_t; + +extern mali_bool mali_gpu_class_is_mali450; +extern mali_bool mali_gpu_class_is_mali470; + +_mali_osk_errcode_t mali_initialize_subsystems(void); + +void mali_terminate_subsystems(void); + +_mali_product_id_t mali_kernel_core_get_product_id(void); + +u32 mali_kernel_core_get_gpu_major_version(void); + +u32 mali_kernel_core_get_gpu_minor_version(void); + +u32 _mali_kernel_core_dump_state(char *buf, u32 size); + +MALI_STATIC_INLINE mali_bool mali_is_mali470(void) +{ + return mali_gpu_class_is_mali470; +} + +MALI_STATIC_INLINE mali_bool mali_is_mali450(void) +{ + return mali_gpu_class_is_mali450; +} + +MALI_STATIC_INLINE mali_bool mali_is_mali400(void) +{ + if (mali_gpu_class_is_mali450 || mali_gpu_class_is_mali470) + return MALI_FALSE; + + return MALI_TRUE; +} +#endif /* __MALI_KERNEL_CORE_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c new file mode 100644 index 000000000000..63b941742249 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c @@ -0,0 +1,440 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_utilization.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_scheduler.h" + +#include "mali_executor.h" +#include "mali_dvfs_policy.h" +#include "mali_control_timer.h" + +/* Thresholds for GP bound detection. */ +#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240 +#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250 + +static _mali_osk_spinlock_irq_t *utilization_data_lock; + +static u32 num_running_gp_cores = 0; +static u32 num_running_pp_cores = 0; + +static u64 work_start_time_gpu = 0; +static u64 work_start_time_gp = 0; +static u64 work_start_time_pp = 0; +static u64 accumulated_work_time_gpu = 0; +static u64 accumulated_work_time_gp = 0; +static u64 accumulated_work_time_pp = 0; + +static u32 last_utilization_gpu = 0 ; +static u32 last_utilization_gp = 0 ; +static u32 last_utilization_pp = 0 ; + +void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL; + +/* Define the first timer control timer timeout in milliseconds */ +static u32 mali_control_first_timeout = 100; +static struct mali_gpu_utilization_data mali_util_data = {0, }; + +struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer) +{ + u64 time_now; + u32 leading_zeroes; + u32 shift_val; + u32 work_normalized_gpu; + u32 work_normalized_gp; + u32 work_normalized_pp; + u32 period_normalized; + u32 utilization_gpu; + u32 utilization_gp; + u32 utilization_pp; + + mali_utilization_data_lock(); + + time_now = _mali_osk_time_get_ns(); + + *time_period = time_now - *start_time; + + if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) { + mali_control_timer_pause(); + /* + * No work done for this period + * - No need to reschedule timer + * - Report zero usage + */ + last_utilization_gpu = 0; + last_utilization_gp = 0; + last_utilization_pp = 0; + + mali_util_data.utilization_gpu = last_utilization_gpu; + mali_util_data.utilization_gp = last_utilization_gp; + mali_util_data.utilization_pp = last_utilization_pp; + + mali_utilization_data_unlock(); + + *need_add_timer = MALI_FALSE; + + mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND); + + MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu)); + MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp)); + MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp)); + + return &mali_util_data; + } + + /* If we are currently busy, update working period up to now */ + if (work_start_time_gpu != 0) { + accumulated_work_time_gpu += (time_now - work_start_time_gpu); + work_start_time_gpu = time_now; + + /* GP and/or PP will also be busy if the GPU is busy at this point */ + + if (work_start_time_gp != 0) { + accumulated_work_time_gp += (time_now - work_start_time_gp); + work_start_time_gp = time_now; + } + + if (work_start_time_pp != 0) { + accumulated_work_time_pp += (time_now - work_start_time_pp); + work_start_time_pp = time_now; + } + } + + /* + * We have two 64-bit values, a dividend and a divisor. + * To avoid dependencies to a 64-bit divider, we shift down the two values + * equally first. + * We shift the dividend up and possibly the divisor down, making the result X in 256. + */ + + /* Shift the 64-bit values down so they fit inside a 32-bit integer */ + leading_zeroes = _mali_osk_clz((u32)(*time_period >> 32)); + shift_val = 32 - leading_zeroes; + work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val); + work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val); + work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val); + period_normalized = (u32)(*time_period >> shift_val); + + /* + * Now, we should report the usage in parts of 256 + * this means we must shift up the dividend or down the divisor by 8 + * (we could do a combination, but we just use one for simplicity, + * but the end result should be good enough anyway) + */ + if (period_normalized > 0x00FFFFFF) { + /* The divisor is so big that it is safe to shift it down */ + period_normalized >>= 8; + } else { + /* + * The divisor is so small that we can shift up the dividend, without loosing any data. + * (dividend is always smaller than the divisor) + */ + work_normalized_gpu <<= 8; + work_normalized_gp <<= 8; + work_normalized_pp <<= 8; + } + + utilization_gpu = work_normalized_gpu / period_normalized; + utilization_gp = work_normalized_gp / period_normalized; + utilization_pp = work_normalized_pp / period_normalized; + + last_utilization_gpu = utilization_gpu; + last_utilization_gp = utilization_gp; + last_utilization_pp = utilization_pp; + + if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) && + (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) { + mali_executor_hint_enable(MALI_EXECUTOR_HINT_GP_BOUND); + } else { + mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND); + } + + /* starting a new period */ + accumulated_work_time_gpu = 0; + accumulated_work_time_gp = 0; + accumulated_work_time_pp = 0; + + *start_time = time_now; + + mali_util_data.utilization_gp = last_utilization_gp; + mali_util_data.utilization_gpu = last_utilization_gpu; + mali_util_data.utilization_pp = last_utilization_pp; + + mali_utilization_data_unlock(); + + *need_add_timer = MALI_TRUE; + + MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu)); + MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp)); + MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp)); + + return &mali_util_data; +} + +_mali_osk_errcode_t mali_utilization_init(void) +{ +#if USING_GPU_UTILIZATION + _mali_osk_device_data data; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + if (NULL != data.utilization_callback) { + mali_utilization_callback = data.utilization_callback; + MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed \n")); + } + } +#endif /* defined(USING_GPU_UTILIZATION) */ + + if (NULL == mali_utilization_callback) { + MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No platform utilization handler installed\n")); + } + + utilization_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION); + if (NULL == utilization_data_lock) { + return _MALI_OSK_ERR_FAULT; + } + + num_running_gp_cores = 0; + num_running_pp_cores = 0; + + return _MALI_OSK_ERR_OK; +} + +void mali_utilization_term(void) +{ + if (NULL != utilization_data_lock) { + _mali_osk_spinlock_irq_term(utilization_data_lock); + } +} + +void mali_utilization_gp_start(void) +{ + mali_utilization_data_lock(); + + ++num_running_gp_cores; + if (1 == num_running_gp_cores) { + u64 time_now = _mali_osk_time_get_ns(); + + /* First GP core started, consider GP busy from now and onwards */ + work_start_time_gp = time_now; + + if (0 == num_running_pp_cores) { + mali_bool is_resume = MALI_FALSE; + /* + * There are no PP cores running, so this is also the point + * at which we consider the GPU to be busy as well. + */ + work_start_time_gpu = time_now; + + is_resume = mali_control_timer_resume(time_now); + + mali_utilization_data_unlock(); + + if (is_resume) { + /* Do some policy in new period for performance consideration */ +#if defined(CONFIG_MALI_DVFS) + /* Clear session->number_of_window_jobs, prepare parameter for dvfs */ + mali_session_max_window_num(); + if (0 == last_utilization_gpu) { + /* + * for mali_dev_pause is called in set clock, + * so each time we change clock, we will set clock to + * highest step even if under down clock case, + * it is not nessesary, so we only set the clock under + * last time utilization equal 0, we stop the timer then + * start the GPU again case + */ + mali_dvfs_policy_new_period(); + } +#endif + /* + * First timeout using short interval for power consideration + * because we give full power in the new period, but if the + * job loading is light, finish in 10ms, the other time all keep + * in high freq it will wast time. + */ + mali_control_timer_add(mali_control_first_timeout); + } + } else { + mali_utilization_data_unlock(); + } + + } else { + /* Nothing to do */ + mali_utilization_data_unlock(); + } +} + +void mali_utilization_pp_start(void) +{ + mali_utilization_data_lock(); + + ++num_running_pp_cores; + if (1 == num_running_pp_cores) { + u64 time_now = _mali_osk_time_get_ns(); + + /* First PP core started, consider PP busy from now and onwards */ + work_start_time_pp = time_now; + + if (0 == num_running_gp_cores) { + mali_bool is_resume = MALI_FALSE; + /* + * There are no GP cores running, so this is also the point + * at which we consider the GPU to be busy as well. + */ + work_start_time_gpu = time_now; + + /* Start a new period if stoped */ + is_resume = mali_control_timer_resume(time_now); + + mali_utilization_data_unlock(); + + if (is_resume) { +#if defined(CONFIG_MALI_DVFS) + /* Clear session->number_of_window_jobs, prepare parameter for dvfs */ + mali_session_max_window_num(); + if (0 == last_utilization_gpu) { + /* + * for mali_dev_pause is called in set clock, + * so each time we change clock, we will set clock to + * highest step even if under down clock case, + * it is not nessesary, so we only set the clock under + * last time utilization equal 0, we stop the timer then + * start the GPU again case + */ + mali_dvfs_policy_new_period(); + } +#endif + + /* + * First timeout using short interval for power consideration + * because we give full power in the new period, but if the + * job loading is light, finish in 10ms, the other time all keep + * in high freq it will wast time. + */ + mali_control_timer_add(mali_control_first_timeout); + } + } else { + mali_utilization_data_unlock(); + } + } else { + /* Nothing to do */ + mali_utilization_data_unlock(); + } +} + +void mali_utilization_gp_end(void) +{ + mali_utilization_data_lock(); + + --num_running_gp_cores; + if (0 == num_running_gp_cores) { + u64 time_now = _mali_osk_time_get_ns(); + + /* Last GP core ended, consider GP idle from now and onwards */ + accumulated_work_time_gp += (time_now - work_start_time_gp); + work_start_time_gp = 0; + + if (0 == num_running_pp_cores) { + /* + * There are no PP cores running, so this is also the point + * at which we consider the GPU to be idle as well. + */ + accumulated_work_time_gpu += (time_now - work_start_time_gpu); + work_start_time_gpu = 0; + } + } + + mali_utilization_data_unlock(); +} + +void mali_utilization_pp_end(void) +{ + mali_utilization_data_lock(); + + --num_running_pp_cores; + if (0 == num_running_pp_cores) { + u64 time_now = _mali_osk_time_get_ns(); + + /* Last PP core ended, consider PP idle from now and onwards */ + accumulated_work_time_pp += (time_now - work_start_time_pp); + work_start_time_pp = 0; + + if (0 == num_running_gp_cores) { + /* + * There are no GP cores running, so this is also the point + * at which we consider the GPU to be idle as well. + */ + accumulated_work_time_gpu += (time_now - work_start_time_gpu); + work_start_time_gpu = 0; + } + } + + mali_utilization_data_unlock(); +} + +mali_bool mali_utilization_enabled(void) +{ +#if defined(CONFIG_MALI_DVFS) + return mali_dvfs_policy_enabled(); +#else + return (NULL != mali_utilization_callback); +#endif /* defined(CONFIG_MALI_DVFS) */ +} + +void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data) +{ + MALI_DEBUG_ASSERT_POINTER(mali_utilization_callback); + + mali_utilization_callback(util_data); +} + +void mali_utilization_reset(void) +{ + accumulated_work_time_gpu = 0; + accumulated_work_time_gp = 0; + accumulated_work_time_pp = 0; + + last_utilization_gpu = 0; + last_utilization_gp = 0; + last_utilization_pp = 0; +} + +void mali_utilization_data_lock(void) +{ + _mali_osk_spinlock_irq_lock(utilization_data_lock); +} + +void mali_utilization_data_unlock(void) +{ + _mali_osk_spinlock_irq_unlock(utilization_data_lock); +} + +void mali_utilization_data_assert_locked(void) +{ + MALI_DEBUG_ASSERT_LOCK_HELD(utilization_data_lock); +} + +u32 _mali_ukk_utilization_gp_pp(void) +{ + return last_utilization_gpu; +} + +u32 _mali_ukk_utilization_gp(void) +{ + return last_utilization_gp; +} + +u32 _mali_ukk_utilization_pp(void) +{ + return last_utilization_pp; +} diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h new file mode 100644 index 000000000000..3c20b1983762 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_UTILIZATION_H__ +#define __MALI_KERNEL_UTILIZATION_H__ + +#include <linux/mali/mali_utgard.h> +#include "mali_osk.h" + +/** + * Initialize/start the Mali GPU utilization metrics reporting. + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t mali_utilization_init(void); + +/** + * Terminate the Mali GPU utilization metrics reporting + */ +void mali_utilization_term(void); + +/** + * Check if Mali utilization is enabled + */ +mali_bool mali_utilization_enabled(void); + +/** + * Should be called when a job is about to execute a GP job + */ +void mali_utilization_gp_start(void); + +/** + * Should be called when a job has completed executing a GP job + */ +void mali_utilization_gp_end(void); + +/** + * Should be called when a job is about to execute a PP job + */ +void mali_utilization_pp_start(void); + +/** + * Should be called when a job has completed executing a PP job + */ +void mali_utilization_pp_end(void); + +/** + * Should be called to calcution the GPU utilization + */ +struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer); + +_mali_osk_spinlock_irq_t *mali_utilization_get_lock(void); + +void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data); + +void mali_utilization_data_lock(void); + +void mali_utilization_data_unlock(void); + +void mali_utilization_data_assert_locked(void); + +void mali_utilization_reset(void); + + +#endif /* __MALI_KERNEL_UTILIZATION_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c b/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c new file mode 100644 index 000000000000..2eed4c88cf56 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" + +#include "mali_osk_profiling.h" + +_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args) +{ + _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event; + MALI_IGNORE(event); /* event is not used for release code, and that is OK */ + + /* + * Manually generate user space events in kernel space. + * This saves user space from calling kernel space twice in this case. + * We just need to remember to add pid and tid manually. + */ + if (event == _MALI_UK_VSYNC_EVENT_BEGIN_WAIT) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC, + _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); + } + + if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC, + _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); + } + + + MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event)); + MALI_SUCCESS; +} + diff --git a/drivers/gpu/arm/utgard/common/mali_l2_cache.c b/drivers/gpu/arm/utgard/common/mali_l2_cache.c new file mode 100644 index 000000000000..494ba789cd08 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_l2_cache.c @@ -0,0 +1,534 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_l2_cache.h" +#include "mali_hw_core.h" +#include "mali_scheduler.h" +#include "mali_pm.h" +#include "mali_pm_domain.h" + +/** + * Size of the Mali L2 cache registers in bytes + */ +#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30 + +/** + * Mali L2 cache register numbers + * Used in the register read/write routines. + * See the hardware documentation for more information about each register + */ +typedef enum mali_l2_cache_register { + MALI400_L2_CACHE_REGISTER_SIZE = 0x0004, + MALI400_L2_CACHE_REGISTER_STATUS = 0x0008, + /*unused = 0x000C */ + MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, + MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014, + MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, + MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, + MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024, + MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C, +} mali_l2_cache_register; + +/** + * Mali L2 cache commands + * These are the commands that can be sent to the Mali L2 cache unit + */ +typedef enum mali_l2_cache_command { + MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, +} mali_l2_cache_command; + +/** + * Mali L2 cache commands + * These are the commands that can be sent to the Mali L2 cache unit + */ +typedef enum mali_l2_cache_enable { + MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /* Default */ + MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, + MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, +} mali_l2_cache_enable; + +/** + * Mali L2 cache status bits + */ +typedef enum mali_l2_cache_status { + MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, + MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, +} mali_l2_cache_status; + +#define MALI400_L2_MAX_READS_NOT_SET -1 + +static struct mali_l2_cache_core * + mali_global_l2s[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, }; +static u32 mali_global_num_l2s = 0; + +int mali_l2_max_reads = MALI400_L2_MAX_READS_NOT_SET; + + +/* Local helper functions */ + +static void mali_l2_cache_reset(struct mali_l2_cache_core *cache); + +static _mali_osk_errcode_t mali_l2_cache_send_command( + struct mali_l2_cache_core *cache, u32 reg, u32 val); + +static void mali_l2_cache_lock(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + _mali_osk_spinlock_irq_lock(cache->lock); +} + +static void mali_l2_cache_unlock(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + _mali_osk_spinlock_irq_unlock(cache->lock); +} + +/* Implementation of the L2 cache interface */ + +struct mali_l2_cache_core *mali_l2_cache_create( + _mali_osk_resource_t *resource, u32 domain_index) +{ + struct mali_l2_cache_core *cache = NULL; +#if defined(DEBUG) + u32 cache_size; +#endif + + MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", + resource->description)); + + if (mali_global_num_l2s >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) { + MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 caches\n")); + return NULL; + } + + cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core)); + if (NULL == cache) { + MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n")); + return NULL; + } + + cache->core_id = mali_global_num_l2s; + cache->counter_src0 = MALI_HW_CORE_NO_COUNTER; + cache->counter_src1 = MALI_HW_CORE_NO_COUNTER; + cache->counter_value0_base = 0; + cache->counter_value1_base = 0; + cache->pm_domain = NULL; + cache->power_is_on = MALI_FALSE; + cache->last_invalidated_id = 0; + + if (_MALI_OSK_ERR_OK != mali_hw_core_create(&cache->hw_core, + resource, MALI400_L2_CACHE_REGISTERS_SIZE)) { + _mali_osk_free(cache); + return NULL; + } + +#if defined(DEBUG) + cache_size = mali_hw_core_register_read(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_SIZE); + MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n", + resource->description, + 1 << (((cache_size >> 16) & 0xff) - 10), + 1 << ((cache_size >> 8) & 0xff), + 1 << (cache_size & 0xff), + 1 << ((cache_size >> 24) & 0xff))); +#endif + + cache->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_L2); + if (NULL == cache->lock) { + MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", + cache->hw_core.description)); + mali_hw_core_delete(&cache->hw_core); + _mali_osk_free(cache); + return NULL; + } + + /* register with correct power domain */ + cache->pm_domain = mali_pm_register_l2_cache( + domain_index, cache); + + mali_global_l2s[mali_global_num_l2s] = cache; + mali_global_num_l2s++; + + return cache; +} + +void mali_l2_cache_delete(struct mali_l2_cache_core *cache) +{ + u32 i; + for (i = 0; i < mali_global_num_l2s; i++) { + if (mali_global_l2s[i] != cache) { + continue; + } + + mali_global_l2s[i] = NULL; + mali_global_num_l2s--; + + if (i == mali_global_num_l2s) { + /* Removed last element, nothing more to do */ + break; + } + + /* + * We removed a l2 cache from the middle of the array, + * so move the last l2 cache to current position + */ + mali_global_l2s[i] = mali_global_l2s[mali_global_num_l2s]; + mali_global_l2s[mali_global_num_l2s] = NULL; + + /* All good */ + break; + } + + _mali_osk_spinlock_irq_term(cache->lock); + mali_hw_core_delete(&cache->hw_core); + _mali_osk_free(cache); +} + +void mali_l2_cache_power_up(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + + mali_l2_cache_lock(cache); + + mali_l2_cache_reset(cache); + + if ((1 << MALI_DOMAIN_INDEX_DUMMY) != cache->pm_domain->pmu_mask) + MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on); + cache->power_is_on = MALI_TRUE; + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_power_down(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + + mali_l2_cache_lock(cache); + + MALI_DEBUG_ASSERT(MALI_TRUE == cache->power_is_on); + + /* + * The HW counters will start from zero again when we resume, + * but we should report counters as always increasing. + * Take a copy of the HW values now in order to add this to + * the values we report after being powered up. + * + * The physical power off of the L2 cache might be outside our + * own control (e.g. runtime PM). That is why we must manually + * set set the counter value to zero as well. + */ + + if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { + cache->counter_value0_base += mali_hw_core_register_read( + &cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0); + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0); + } + + if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { + cache->counter_value1_base += mali_hw_core_register_read( + &cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1); + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0); + } + + + cache->power_is_on = MALI_FALSE; + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_core_set_counter_src( + struct mali_l2_cache_core *cache, u32 source_id, u32 counter) +{ + u32 reg_offset_src; + u32 reg_offset_val; + + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT(source_id >= 0 && source_id <= 1); + + mali_l2_cache_lock(cache); + + if (0 == source_id) { + /* start counting from 0 */ + cache->counter_value0_base = 0; + cache->counter_src0 = counter; + reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0; + reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0; + } else { + /* start counting from 0 */ + cache->counter_value1_base = 0; + cache->counter_src1 = counter; + reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1; + reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1; + } + + if (cache->power_is_on) { + u32 hw_src; + + if (MALI_HW_CORE_NO_COUNTER != counter) { + hw_src = counter; + } else { + hw_src = 0; /* disable value for HW */ + } + + /* Set counter src */ + mali_hw_core_register_write(&cache->hw_core, + reg_offset_src, hw_src); + + /* Make sure the HW starts counting from 0 again */ + mali_hw_core_register_write(&cache->hw_core, + reg_offset_val, 0); + } + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_core_get_counter_values( + struct mali_l2_cache_core *cache, + u32 *src0, u32 *value0, u32 *src1, u32 *value1) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT(NULL != src0); + MALI_DEBUG_ASSERT(NULL != value0); + MALI_DEBUG_ASSERT(NULL != src1); + MALI_DEBUG_ASSERT(NULL != value1); + + mali_l2_cache_lock(cache); + + *src0 = cache->counter_src0; + *src1 = cache->counter_src1; + + if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { + if (MALI_TRUE == cache->power_is_on) { + *value0 = mali_hw_core_register_read(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0); + } else { + *value0 = 0; + } + + /* Add base offset value (in case we have been power off) */ + *value0 += cache->counter_value0_base; + } + + if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { + if (MALI_TRUE == cache->power_is_on) { + *value1 = mali_hw_core_register_read(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1); + } else { + *value1 = 0; + } + + /* Add base offset value (in case we have been power off) */ + *value1 += cache->counter_value1_base; + } + + mali_l2_cache_unlock(cache); +} + +struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index) +{ + if (mali_global_num_l2s > index) { + return mali_global_l2s[index]; + } + + return NULL; +} + +u32 mali_l2_cache_core_get_glob_num_l2_cores(void) +{ + return mali_global_num_l2s; +} + +void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + + if (NULL == cache) { + return; + } + + mali_l2_cache_lock(cache); + + cache->last_invalidated_id = mali_scheduler_get_new_cache_order(); + mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_invalidate_conditional( + struct mali_l2_cache_core *cache, u32 id) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + + if (NULL == cache) { + return; + } + + /* + * If the last cache invalidation was done by a job with a higher id we + * don't have to flush. Since user space will store jobs w/ their + * corresponding memory in sequence (first job #0, then job #1, ...), + * we don't have to flush for job n-1 if job n has already invalidated + * the cache since we know for sure that job n-1's memory was already + * written when job n was started. + */ + + mali_l2_cache_lock(cache); + + if (((s32)id) > ((s32)cache->last_invalidated_id)) { + /* Set latest invalidated id to current "point in time" */ + cache->last_invalidated_id = + mali_scheduler_get_new_cache_order(); + mali_l2_cache_send_command(cache, + MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + } + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_invalidate_all(void) +{ + u32 i; + for (i = 0; i < mali_global_num_l2s; i++) { + struct mali_l2_cache_core *cache = mali_global_l2s[i]; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(cache); + + mali_l2_cache_lock(cache); + + if (MALI_TRUE != cache->power_is_on) { + mali_l2_cache_unlock(cache); + continue; + } + + cache->last_invalidated_id = + mali_scheduler_get_new_cache_order(); + + ret = mali_l2_cache_send_command(cache, + MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to invalidate cache\n")); + } + + mali_l2_cache_unlock(cache); + } +} + +void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages) +{ + u32 i; + for (i = 0; i < mali_global_num_l2s; i++) { + struct mali_l2_cache_core *cache = mali_global_l2s[i]; + u32 j; + + MALI_DEBUG_ASSERT_POINTER(cache); + + mali_l2_cache_lock(cache); + + if (MALI_TRUE != cache->power_is_on) { + mali_l2_cache_unlock(cache); + continue; + } + + for (j = 0; j < num_pages; j++) { + _mali_osk_errcode_t ret; + + ret = mali_l2_cache_send_command(cache, + MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, + pages[j]); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to invalidate cache (page)\n")); + } + } + + mali_l2_cache_unlock(cache); + } +} + +/* -------- local helper functions below -------- */ + +static void mali_l2_cache_reset(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock); + + /* Invalidate cache (just to keep it in a known state at startup) */ + mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + + /* Enable cache */ + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_ENABLE, + (u32)MALI400_L2_CACHE_ENABLE_ACCESS | + (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE); + + if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) { + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_MAX_READS, + (u32)mali_l2_max_reads); + } + + /* Restart any performance counters (if enabled) */ + if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { + + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, + cache->counter_src0); + } + + if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, + cache->counter_src1); + } +} + +static _mali_osk_errcode_t mali_l2_cache_send_command( + struct mali_l2_cache_core *cache, u32 reg, u32 val) +{ + int i = 0; + const int loop_count = 100000; + + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock); + + /* + * First, wait for L2 cache command handler to go idle. + * (Commands received while processing another command will be ignored) + */ + for (i = 0; i < loop_count; i++) { + if (!(mali_hw_core_register_read(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_STATUS) & + (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) { + break; + } + } + + if (i == loop_count) { + MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n")); + return _MALI_OSK_ERR_FAULT; + } + + /* then issue the command */ + mali_hw_core_register_write(&cache->hw_core, reg, val); + + return _MALI_OSK_ERR_OK; +} diff --git a/drivers/gpu/arm/utgard/common/mali_l2_cache.h b/drivers/gpu/arm/utgard/common/mali_l2_cache.h new file mode 100644 index 000000000000..6dc8ec22d6de --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_l2_cache.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_L2_CACHE_H__ +#define __MALI_KERNEL_L2_CACHE_H__ + +#include "mali_osk.h" +#include "mali_hw_core.h" + +#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3 +/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 MP4) */ +#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5 + +/** + * Definition of the L2 cache core struct + * Used to track a L2 cache unit in the system. + * Contains information about the mapping of the registers + */ +struct mali_l2_cache_core { + /* Common HW core functionality */ + struct mali_hw_core hw_core; + + /* Synchronize L2 cache access */ + _mali_osk_spinlock_irq_t *lock; + + /* Unique core ID */ + u32 core_id; + + /* The power domain this L2 cache belongs to */ + struct mali_pm_domain *pm_domain; + + /* MALI_TRUE if power is on for this L2 cache */ + mali_bool power_is_on; + + /* A "timestamp" to avoid unnecessary flushes */ + u32 last_invalidated_id; + + /* Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */ + u32 counter_src0; + + /* Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */ + u32 counter_src1; + + /* + * Performance counter 0 value base/offset + * (allows accumulative reporting even after power off) + */ + u32 counter_value0_base; + + /* + * Performance counter 0 value base/offset + * (allows accumulative reporting even after power off) + */ + u32 counter_value1_base; + + /* Used by PM domains to link L2 caches of same domain */ + _mali_osk_list_t pm_domain_list; +}; + +_mali_osk_errcode_t mali_l2_cache_initialize(void); +void mali_l2_cache_terminate(void); + +struct mali_l2_cache_core *mali_l2_cache_create( + _mali_osk_resource_t *resource, u32 domain_index); +void mali_l2_cache_delete(struct mali_l2_cache_core *cache); + +MALI_STATIC_INLINE u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + return cache->core_id; +} + +MALI_STATIC_INLINE struct mali_pm_domain *mali_l2_cache_get_pm_domain( + struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + return cache->pm_domain; +} + +void mali_l2_cache_power_up(struct mali_l2_cache_core *cache); +void mali_l2_cache_power_down(struct mali_l2_cache_core *cache); + +void mali_l2_cache_core_set_counter_src( + struct mali_l2_cache_core *cache, u32 source_id, u32 counter); + +MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src0( + struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + return cache->counter_src0; +} + +MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src1( + struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + return cache->counter_src1; +} + +void mali_l2_cache_core_get_counter_values( + struct mali_l2_cache_core *cache, + u32 *src0, u32 *value0, u32 *src1, u32 *value1); + +struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index); +u32 mali_l2_cache_core_get_glob_num_l2_cores(void); + +struct mali_group *mali_l2_cache_get_group( + struct mali_l2_cache_core *cache, u32 index); + +void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache); +void mali_l2_cache_invalidate_conditional( + struct mali_l2_cache_core *cache, u32 id); + +void mali_l2_cache_invalidate_all(void); +void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages); + +#endif /* __MALI_KERNEL_L2_CACHE_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_mem_validation.c b/drivers/gpu/arm/utgard/common/mali_mem_validation.c new file mode 100644 index 000000000000..e2b5b2a7f739 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_mem_validation.c @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_mem_validation.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" + +#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF + +typedef struct { + u32 phys_base; /**< Mali physical base of the memory, page aligned */ + u32 size; /**< size in bytes of the memory, multiple of page size */ +} _mali_mem_validation_t; + +static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR }; + +_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size) +{ + /* Check that no other MEM_VALIDATION resources exist */ + if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) { + MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n")); + return _MALI_OSK_ERR_FAULT; + } + + /* Check restrictions on page alignment */ + if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) || + (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) { + MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n")); + return _MALI_OSK_ERR_FAULT; + } + + mali_mem_validator.phys_base = start; + mali_mem_validator.size = size; + MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n", + mali_mem_validator.phys_base, mali_mem_validator.size)); + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size) +{ + if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */ + if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) && + (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) { + if ((phys_addr >= mali_mem_validator.phys_base) && + ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) && + (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) && + ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) { + MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1))); + return _MALI_OSK_ERR_OK; + } + } + } + + MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size)); + + return _MALI_OSK_ERR_FAULT; +} diff --git a/drivers/gpu/arm/utgard/common/mali_mem_validation.h b/drivers/gpu/arm/utgard/common/mali_mem_validation.h new file mode 100644 index 000000000000..267720625d87 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_mem_validation.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2011-2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEM_VALIDATION_H__ +#define __MALI_MEM_VALIDATION_H__ + +#include "mali_osk.h" + +_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size); +_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size); + +#endif /* __MALI_MEM_VALIDATION_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_mmu.c b/drivers/gpu/arm/utgard/common/mali_mmu.c new file mode 100644 index 000000000000..b975c1468d67 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_mmu.c @@ -0,0 +1,433 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_ukk.h" + +#include "mali_mmu.h" +#include "mali_hw_core.h" +#include "mali_group.h" +#include "mali_mmu_page_directory.h" + +/** + * Size of the MMU registers in bytes + */ +#define MALI_MMU_REGISTERS_SIZE 0x24 + +/** + * MMU commands + * These are the commands that can be sent + * to the MMU unit. + */ +typedef enum mali_mmu_command { + MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */ + MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */ + MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */ + MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */ + MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */ + MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */ + MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */ +} mali_mmu_command; + +static void mali_mmu_probe_trigger(void *data); +static _mali_osk_errcode_t mali_mmu_probe_ack(void *data); + +MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu); + +/* page fault queue flush helper pages + * note that the mapping pointers are currently unused outside of the initialization functions */ +static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE; +static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL; +static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE; +static mali_io_address mali_page_fault_flush_page_table_mapping = NULL; +static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE; +static mali_io_address mali_page_fault_flush_data_page_mapping = NULL; + +/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */ +static mali_dma_addr mali_empty_page_directory_phys = MALI_INVALID_PAGE; +static mali_io_address mali_empty_page_directory_virt = NULL; + + +_mali_osk_errcode_t mali_mmu_initialize(void) +{ + /* allocate the helper pages */ + mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt); + if (0 == mali_empty_page_directory_phys) { + MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n")); + mali_empty_page_directory_phys = MALI_INVALID_PAGE; + return _MALI_OSK_ERR_NOMEM; + } + + if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory, + &mali_page_fault_flush_page_directory_mapping, + &mali_page_fault_flush_page_table, + &mali_page_fault_flush_page_table_mapping, + &mali_page_fault_flush_data_page, + &mali_page_fault_flush_data_page_mapping)) { + MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n")); + mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt); + mali_empty_page_directory_phys = MALI_INVALID_PAGE; + mali_empty_page_directory_virt = NULL; + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_mmu_terminate(void) +{ + MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n")); + + /* Free global helper pages */ + mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt); + mali_empty_page_directory_phys = MALI_INVALID_PAGE; + mali_empty_page_directory_virt = NULL; + + /* Free the page fault flush pages */ + mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory, + &mali_page_fault_flush_page_directory_mapping, + &mali_page_fault_flush_page_table, + &mali_page_fault_flush_page_table_mapping, + &mali_page_fault_flush_data_page, + &mali_page_fault_flush_data_page_mapping); +} + +struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual) +{ + struct mali_mmu_core *mmu = NULL; + + MALI_DEBUG_ASSERT_POINTER(resource); + + MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description)); + + mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core)); + if (NULL != mmu) { + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) { + if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) { + if (is_virtual) { + /* Skip reset and IRQ setup for virtual MMU */ + return mmu; + } + + if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) { + /* Setup IRQ handlers (which will do IRQ probing if needed) */ + mmu->irq = _mali_osk_irq_init(resource->irq, + mali_group_upper_half_mmu, + group, + mali_mmu_probe_trigger, + mali_mmu_probe_ack, + mmu, + resource->description); + if (NULL != mmu->irq) { + return mmu; + } else { + MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description)); + } + } + mali_group_remove_mmu_core(group); + } else { + MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description)); + } + mali_hw_core_delete(&mmu->hw_core); + } + + _mali_osk_free(mmu); + } else { + MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n")); + } + + return NULL; +} + +void mali_mmu_delete(struct mali_mmu_core *mmu) +{ + if (NULL != mmu->irq) { + _mali_osk_irq_term(mmu->irq); + } + + mali_hw_core_delete(&mmu->hw_core); + _mali_osk_free(mmu); +} + +static void mali_mmu_enable_paging(struct mali_mmu_core *mmu) +{ + int i; + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { + if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) { + break; + } + } + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); + } +} + +/** + * Issues the enable stall command to the MMU and waits for HW to complete the request + * @param mmu The MMU to enable paging for + * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out) + */ +static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu) +{ + int i; + u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); + + if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) { + MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n")); + return MALI_TRUE; + } + + if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n")); + return MALI_FALSE; + } + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { + mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); + if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + break; + } + if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) { + break; + } + if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) { + break; + } + } + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); + return MALI_FALSE; + } + + if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n")); + return MALI_FALSE; + } + + return MALI_TRUE; +} + +/** + * Issues the disable stall command to the MMU and waits for HW to complete the request + * @param mmu The MMU to enable paging for + */ +static void mali_mmu_disable_stall(struct mali_mmu_core *mmu) +{ + int i; + u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); + + if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) { + MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n")); + return; + } + if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n")); + return; + } + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { + u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); + if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) { + break; + } + if (status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + break; + } + if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) { + break; + } + } + if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); +} + +void mali_mmu_page_fault_done(struct mali_mmu_core *mmu) +{ + MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description)); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE); +} + +MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu) +{ + int i; + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE); + MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR)); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { + if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) { + break; + } + } + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); + return _MALI_OSK_ERR_FAULT; + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu) +{ + _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; + mali_bool stall_success; + MALI_DEBUG_ASSERT_POINTER(mmu); + + stall_success = mali_mmu_enable_stall(mmu); + if (!stall_success) { + err = _MALI_OSK_ERR_BUSY; + } + + MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description)); + + if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) { + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR); + /* no session is active, so just activate the empty page directory */ + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys); + mali_mmu_enable_paging(mmu); + err = _MALI_OSK_ERR_OK; + } + mali_mmu_disable_stall(mmu); + + return err; +} + +mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu) +{ + mali_bool stall_success = mali_mmu_enable_stall(mmu); + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); + + if (MALI_FALSE == stall_success) { + /* False means that it is in Pagefault state. Not possible to disable_stall then */ + return MALI_FALSE; + } + + mali_mmu_disable_stall(mmu); + return MALI_TRUE; +} + +void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu) +{ + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); +} + + +void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address) +{ + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address)); +} + +static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory) +{ + /* The MMU must be in stalled or page fault mode, for this writing to work */ + MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) + & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE))); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); + +} + +void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir) +{ + mali_bool stall_success; + MALI_DEBUG_ASSERT_POINTER(mmu); + + MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description)); + + stall_success = mali_mmu_enable_stall(mmu); + MALI_DEBUG_ASSERT(stall_success); + MALI_IGNORE(stall_success); + mali_mmu_activate_address_space(mmu, pagedir->page_directory); + mali_mmu_disable_stall(mmu); +} + +void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu) +{ + mali_bool stall_success; + + MALI_DEBUG_ASSERT_POINTER(mmu); + MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description)); + + stall_success = mali_mmu_enable_stall(mmu); + + /* This function can only be called when the core is idle, so it could not fail. */ + MALI_DEBUG_ASSERT(stall_success); + MALI_IGNORE(stall_success); + + mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys); + mali_mmu_disable_stall(mmu); +} + +void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu) +{ + mali_bool stall_success; + MALI_DEBUG_ASSERT_POINTER(mmu); + + MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description)); + stall_success = mali_mmu_enable_stall(mmu); + /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */ + mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory); + if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu); +} + +/* Is called when we want the mmu to give an interrupt */ +static void mali_mmu_probe_trigger(void *data) +{ + struct mali_mmu_core *mmu = (struct mali_mmu_core *)data; + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR); +} + +/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */ +static _mali_osk_errcode_t mali_mmu_probe_ack(void *data) +{ + struct mali_mmu_core *mmu = (struct mali_mmu_core *)data; + u32 int_stat; + + int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS); + + MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat)); + if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) { + MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n")); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT); + } else { + MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n")); + } + + if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) { + MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n")); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR); + } else { + MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n")); + } + + if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) == + (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) { + return _MALI_OSK_ERR_OK; + } + + return _MALI_OSK_ERR_FAULT; +} + +#if 0 +void mali_mmu_print_state(struct mali_mmu_core *mmu) +{ + MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); +} +#endif diff --git a/drivers/gpu/arm/utgard/common/mali_mmu.h b/drivers/gpu/arm/utgard/common/mali_mmu.h new file mode 100644 index 000000000000..101c968bd45d --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_mmu.h @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MMU_H__ +#define __MALI_MMU_H__ + +#include "mali_osk.h" +#include "mali_mmu_page_directory.h" +#include "mali_hw_core.h" + +/* Forward declaration from mali_group.h */ +struct mali_group; + +/** + * MMU register numbers + * Used in the register read/write routines. + * See the hardware documentation for more information about each register + */ +typedef enum mali_mmu_register { + MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */ + MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */ + MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */ + MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */ + MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */ + MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */ + MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */ + MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */ + MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */ +} mali_mmu_register; + +/** + * MMU interrupt register bits + * Each cause of the interrupt is reported + * through the (raw) interrupt status registers. + * Multiple interrupts can be pending, so multiple bits + * can be set at once. + */ +typedef enum mali_mmu_interrupt { + MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */ + MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */ +} mali_mmu_interrupt; + +typedef enum mali_mmu_status_bits { + MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0, + MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1, + MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2, + MALI_MMU_STATUS_BIT_IDLE = 1 << 3, + MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4, + MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5, + MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31, +} mali_mmu_status_bits; + +/** + * Definition of the MMU struct + * Used to track a MMU unit in the system. + * Contains information about the mapping of the registers + */ +struct mali_mmu_core { + struct mali_hw_core hw_core; /**< Common for all HW cores */ + _mali_osk_irq_t *irq; /**< IRQ handler */ +}; + +_mali_osk_errcode_t mali_mmu_initialize(void); + +void mali_mmu_terminate(void); + +struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual); +void mali_mmu_delete(struct mali_mmu_core *mmu); + +_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu); +mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu); +void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu); +void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address); + +void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir); +void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu); +void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu); + +void mali_mmu_page_fault_done(struct mali_mmu_core *mmu); + +MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(struct mali_mmu_core *mmu) +{ + u32 rawstat_used = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT); + if (0 == rawstat_used) { + return MALI_INTERRUPT_RESULT_NONE; + } + return MALI_INTERRUPT_RESULT_ERROR; +} + + +MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu) +{ + return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS); +} + +MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu) +{ + return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT); +} + +MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu) +{ + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0); +} + +MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu) +{ + return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); +} + +MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu) +{ + return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR); +} + +#endif /* __MALI_MMU_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c new file mode 100644 index 000000000000..126fd77ec9c9 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c @@ -0,0 +1,495 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" +#include "mali_uk_types.h" +#include "mali_mmu_page_directory.h" +#include "mali_memory.h" +#include "mali_l2_cache.h" + +static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data); + +u32 mali_allocate_empty_page(mali_io_address *virt_addr) +{ + _mali_osk_errcode_t err; + mali_io_address mapping; + mali_dma_addr address; + + if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) { + /* Allocation failed */ + MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n")); + return 0; + } + + MALI_DEBUG_ASSERT_POINTER(mapping); + + err = fill_page(mapping, 0); + if (_MALI_OSK_ERR_OK != err) { + mali_mmu_release_table_page(address, mapping); + MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n")); + return 0; + } + + *virt_addr = mapping; + return address; +} + +void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr) +{ + if (MALI_INVALID_PAGE != address) { + mali_mmu_release_table_page(address, virt_addr); + } +} + +_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory, + mali_io_address *page_directory_mapping, + mali_dma_addr *page_table, mali_io_address *page_table_mapping, + mali_dma_addr *data_page, mali_io_address *data_page_mapping) +{ + _mali_osk_errcode_t err; + + err = mali_mmu_get_table_page(data_page, data_page_mapping); + if (_MALI_OSK_ERR_OK == err) { + err = mali_mmu_get_table_page(page_table, page_table_mapping); + if (_MALI_OSK_ERR_OK == err) { + err = mali_mmu_get_table_page(page_directory, page_directory_mapping); + if (_MALI_OSK_ERR_OK == err) { + fill_page(*data_page_mapping, 0); + fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT); + fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT); + MALI_SUCCESS; + } + mali_mmu_release_table_page(*page_table, *page_table_mapping); + *page_table = MALI_INVALID_PAGE; + } + mali_mmu_release_table_page(*data_page, *data_page_mapping); + *data_page = MALI_INVALID_PAGE; + } + return err; +} + +void mali_destroy_fault_flush_pages( + mali_dma_addr *page_directory, mali_io_address *page_directory_mapping, + mali_dma_addr *page_table, mali_io_address *page_table_mapping, + mali_dma_addr *data_page, mali_io_address *data_page_mapping) +{ + if (MALI_INVALID_PAGE != *page_directory) { + mali_mmu_release_table_page(*page_directory, *page_directory_mapping); + *page_directory = MALI_INVALID_PAGE; + *page_directory_mapping = NULL; + } + + if (MALI_INVALID_PAGE != *page_table) { + mali_mmu_release_table_page(*page_table, *page_table_mapping); + *page_table = MALI_INVALID_PAGE; + *page_table_mapping = NULL; + } + + if (MALI_INVALID_PAGE != *data_page) { + mali_mmu_release_table_page(*data_page, *data_page_mapping); + *data_page = MALI_INVALID_PAGE; + *data_page_mapping = NULL; + } +} + +static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data) +{ + int i; + MALI_DEBUG_ASSERT_POINTER(mapping); + + for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) { + _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data); + } + _mali_osk_mem_barrier(); + MALI_SUCCESS; +} + +_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size) +{ + const int first_pde = MALI_MMU_PDE_ENTRY(mali_address); + const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1); + _mali_osk_errcode_t err; + mali_io_address pde_mapping; + mali_dma_addr pde_phys; + int i, page_count; + u32 start_address; + if (last_pde < first_pde) + return _MALI_OSK_ERR_INVALID_ARGS; + + for (i = first_pde; i <= last_pde; i++) { + if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, + i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) { + /* Page table not present */ + MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]); + MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]); + + err = mali_mmu_get_table_page(&pde_phys, &pde_mapping); + if (_MALI_OSK_ERR_OK != err) { + MALI_PRINT_ERROR(("Failed to allocate page table page.\n")); + return err; + } + pagedir->page_entries_mapped[i] = pde_mapping; + + /* Update PDE, mark as present */ + _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), + pde_phys | MALI_MMU_FLAGS_PRESENT); + + MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]); + } + + if (first_pde == last_pde) { + pagedir->page_entries_usage_count[i] += size / MALI_MMU_PAGE_SIZE; + } else if (i == first_pde) { + start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE; + page_count = (start_address + MALI_MMU_VIRTUAL_PAGE_SIZE - mali_address) / MALI_MMU_PAGE_SIZE; + pagedir->page_entries_usage_count[i] += page_count; + } else if (i == last_pde) { + start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE; + page_count = (mali_address + size - start_address) / MALI_MMU_PAGE_SIZE; + pagedir->page_entries_usage_count[i] += page_count; + } else { + pagedir->page_entries_usage_count[i] = 1024; + } + } + _mali_osk_write_mem_barrier(); + + return _MALI_OSK_ERR_OK; +} + +MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size) +{ + int i; + const int first_pte = MALI_MMU_PTE_ENTRY(mali_address); + const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1); + + for (i = first_pte; i <= last_pte; i++) { + _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0); + } +} + +static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index) +{ + return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, + index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK); +} + + +_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size) +{ + const int first_pde = MALI_MMU_PDE_ENTRY(mali_address); + const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1); + u32 left = size; + int i; + mali_bool pd_changed = MALI_FALSE; + u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */ + u32 num_pages_inv = 0; + mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */ + + /* For all page directory entries in range. */ + for (i = first_pde; i <= last_pde; i++) { + u32 size_in_pde, offset; + + MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]); + MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]); + + /* Offset into page table, 0 if mali_address is 4MiB aligned */ + offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1)); + if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) { + size_in_pde = left; + } else { + size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset; + } + + pagedir->page_entries_usage_count[i] -= size_in_pde / MALI_MMU_PAGE_SIZE; + + /* If entire page table is unused, free it */ + if (0 == pagedir->page_entries_usage_count[i]) { + u32 page_phys; + void *page_virt; + MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n")); + /* last reference removed, no need to zero out each PTE */ + + page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32))); + page_virt = pagedir->page_entries_mapped[i]; + pagedir->page_entries_mapped[i] = NULL; + _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0); + + mali_mmu_release_table_page(page_phys, page_virt); + pd_changed = MALI_TRUE; + } else { + MALI_DEBUG_ASSERT(num_pages_inv < 2); + if (num_pages_inv < 2) { + pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i); + num_pages_inv++; + } else { + invalidate_all = MALI_TRUE; + } + + /* If part of the page table is still in use, zero the relevant PTEs */ + mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde); + } + + left -= size_in_pde; + mali_address += size_in_pde; + } + _mali_osk_write_mem_barrier(); + + /* L2 pages invalidation */ + if (MALI_TRUE == pd_changed) { + MALI_DEBUG_ASSERT(num_pages_inv < 3); + if (num_pages_inv < 3) { + pages_to_invalidate[num_pages_inv] = pagedir->page_directory; + num_pages_inv++; + } else { + invalidate_all = MALI_TRUE; + } + } + + if (invalidate_all) { + mali_l2_cache_invalidate_all(); + } else { + mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv); + } + + MALI_SUCCESS; +} + +struct mali_page_directory *mali_mmu_pagedir_alloc(void) +{ + struct mali_page_directory *pagedir; + _mali_osk_errcode_t err; + mali_dma_addr phys; + + pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory)); + if (NULL == pagedir) { + return NULL; + } + + err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped); + if (_MALI_OSK_ERR_OK != err) { + _mali_osk_free(pagedir); + return NULL; + } + + pagedir->page_directory = (u32)phys; + + /* Zero page directory */ + fill_page(pagedir->page_directory_mapped, 0); + + return pagedir; +} + +void mali_mmu_pagedir_free(struct mali_page_directory *pagedir) +{ + const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]); + int i; + + /* Free referenced page tables and zero PDEs. */ + for (i = 0; i < num_page_table_entries; i++) { + if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32( + pagedir->page_directory_mapped, + sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) { + mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, + i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK; + _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0); + mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]); + } + } + _mali_osk_write_mem_barrier(); + + /* Free the page directory page. */ + mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped); + + _mali_osk_free(pagedir); +} + + +void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, + mali_dma_addr phys_address, u32 size, u32 permission_bits) +{ + u32 end_address = mali_address + size; + u32 mali_phys = (u32)phys_address; + + /* Map physical pages into MMU page tables */ + for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) { + MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]); + _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], + MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), + mali_phys | permission_bits); + } +} + +void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr) +{ +#if defined(DEBUG) + u32 pde_index, pte_index; + u32 pde, pte; + + pde_index = MALI_MMU_PDE_ENTRY(fault_addr); + pte_index = MALI_MMU_PTE_ENTRY(fault_addr); + + + pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, + pde_index * sizeof(u32)); + + + if (pde & MALI_MMU_FLAGS_PRESENT) { + u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde); + + pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index], + pte_index * sizeof(u32)); + + MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n" + "\t\tPTE: %08x, page %08x is %s\n", + fault_addr, pte_addr, pte, + MALI_MMU_ENTRY_ADDRESS(pte), + pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present")); + } else { + MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n", + fault_addr, pde)); + } +#else + MALI_IGNORE(pagedir); + MALI_IGNORE(fault_addr); +#endif +} + +/* For instrumented */ +struct dump_info { + u32 buffer_left; + u32 register_writes_size; + u32 page_table_dump_size; + u32 *buffer; +}; + +static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info) +{ + if (NULL != info) { + info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */ + + if (NULL != info->buffer) { + /* check that we have enough space */ + if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM); + + *info->buffer = where; + info->buffer++; + + *info->buffer = what; + info->buffer++; + + info->buffer_left -= sizeof(u32) * 2; + } + } + + MALI_SUCCESS; +} + +static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info) +{ + if (NULL != info) { + /* 4096 for the page and 4 bytes for the address */ + const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4; + const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE; + const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4; + + info->page_table_dump_size += dump_size_in_bytes; + + if (NULL != info->buffer) { + if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM); + + *info->buffer = phys_addr; + info->buffer++; + + _mali_osk_memcpy(info->buffer, page, page_size_in_bytes); + info->buffer += page_size_in_elements; + + info->buffer_left -= dump_size_in_bytes; + } + } + + MALI_SUCCESS; +} + +static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info) +{ + MALI_DEBUG_ASSERT_POINTER(pagedir); + MALI_DEBUG_ASSERT_POINTER(info); + + if (NULL != pagedir->page_directory_mapped) { + int i; + + MALI_CHECK_NO_ERROR( + mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info) + ); + + for (i = 0; i < 1024; i++) { + if (NULL != pagedir->page_entries_mapped[i]) { + MALI_CHECK_NO_ERROR( + mali_mmu_dump_page(pagedir->page_entries_mapped[i], + _mali_osk_mem_ioread32(pagedir->page_directory_mapped, + i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info) + ); + } + } + } + + MALI_SUCCESS; +} + +static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info) +{ + MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory, + "set the page directory address", info)); + MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info)); + MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info)); + MALI_SUCCESS; +} + +_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args) +{ + struct dump_info info = { 0, 0, 0, NULL }; + struct mali_session_data *session_data; + + session_data = (struct mali_session_data *)(uintptr_t)(args->ctx); + MALI_DEBUG_ASSERT_POINTER(session_data); + MALI_DEBUG_ASSERT_POINTER(args); + + MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info)); + MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info)); + args->size = info.register_writes_size + info.page_table_dump_size; + MALI_SUCCESS; +} + +_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args) +{ + struct dump_info info = { 0, 0, 0, NULL }; + struct mali_session_data *session_data; + + MALI_DEBUG_ASSERT_POINTER(args); + + session_data = (struct mali_session_data *)(uintptr_t)(args->ctx); + MALI_DEBUG_ASSERT_POINTER(session_data); + + info.buffer_left = args->size; + info.buffer = (u32 *)(uintptr_t)args->buffer; + + args->register_writes = (uintptr_t)info.buffer; + MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info)); + + args->page_table_dump = (uintptr_t)info.buffer; + MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info)); + + args->register_writes_size = info.register_writes_size; + args->page_table_dump_size = info.page_table_dump_size; + + MALI_SUCCESS; +} diff --git a/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h new file mode 100644 index 000000000000..561fb60b9803 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MMU_PAGE_DIRECTORY_H__ +#define __MALI_MMU_PAGE_DIRECTORY_H__ + +#include "mali_osk.h" + +/** + * Size of an MMU page in bytes + */ +#define MALI_MMU_PAGE_SIZE 0x1000 + +/* + * Size of the address space referenced by a page table page + */ +#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */ + +/** + * Page directory index from address + * Calculates the page directory index from the given address + */ +#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF) + +/** + * Page table index from address + * Calculates the page table index from the given address + */ +#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF) + +/** + * Extract the memory address from an PDE/PTE entry + */ +#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00) + +#define MALI_INVALID_PAGE ((u32)(~0)) + +/** + * + */ +typedef enum mali_mmu_entry_flags { + MALI_MMU_FLAGS_PRESENT = 0x01, + MALI_MMU_FLAGS_READ_PERMISSION = 0x02, + MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04, + MALI_MMU_FLAGS_OVERRIDE_CACHE = 0x8, + MALI_MMU_FLAGS_WRITE_CACHEABLE = 0x10, + MALI_MMU_FLAGS_WRITE_ALLOCATE = 0x20, + MALI_MMU_FLAGS_WRITE_BUFFERABLE = 0x40, + MALI_MMU_FLAGS_READ_CACHEABLE = 0x80, + MALI_MMU_FLAGS_READ_ALLOCATE = 0x100, + MALI_MMU_FLAGS_MASK = 0x1FF, +} mali_mmu_entry_flags; + + +#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \ + MALI_MMU_FLAGS_PRESENT | \ + MALI_MMU_FLAGS_READ_PERMISSION | \ + MALI_MMU_FLAGS_WRITE_PERMISSION | \ + MALI_MMU_FLAGS_OVERRIDE_CACHE | \ + MALI_MMU_FLAGS_WRITE_CACHEABLE | \ + MALI_MMU_FLAGS_WRITE_BUFFERABLE | \ + MALI_MMU_FLAGS_READ_CACHEABLE | \ + MALI_MMU_FLAGS_READ_ALLOCATE ) + +#define MALI_MMU_FLAGS_DEFAULT ( \ + MALI_MMU_FLAGS_PRESENT | \ + MALI_MMU_FLAGS_READ_PERMISSION | \ + MALI_MMU_FLAGS_WRITE_PERMISSION ) + + +struct mali_page_directory { + u32 page_directory; /**< Physical address of the memory session's page directory */ + mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */ + + mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */ + u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */ +}; + +/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */ +_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size); +_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size); + +/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */ +void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, + mali_dma_addr phys_address, u32 size, u32 permission_bits); + +u32 mali_allocate_empty_page(mali_io_address *virtual); +void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr); +_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory, + mali_io_address *page_directory_mapping, + mali_dma_addr *page_table, mali_io_address *page_table_mapping, + mali_dma_addr *data_page, mali_io_address *data_page_mapping); +void mali_destroy_fault_flush_pages( + mali_dma_addr *page_directory, mali_io_address *page_directory_mapping, + mali_dma_addr *page_table, mali_io_address *page_table_mapping, + mali_dma_addr *data_page, mali_io_address *data_page_mapping); + +struct mali_page_directory *mali_mmu_pagedir_alloc(void); +void mali_mmu_pagedir_free(struct mali_page_directory *pagedir); + +void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr); + +#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_osk.h b/drivers/gpu/arm/utgard/common/mali_osk.h new file mode 100644 index 000000000000..4c9e57cba18f --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_osk.h @@ -0,0 +1,1397 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk.h + * Defines the OS abstraction layer for the kernel device driver (OSK) + */ + +#ifndef __MALI_OSK_H__ +#define __MALI_OSK_H__ + +#include "mali_osk_types.h" +#include "mali_osk_specific.h" /* include any per-os specifics */ +#include "mali_osk_locks.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup uddapi Unified Device Driver (UDD) APIs + * + * @{ + */ + +/** + * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs + * + * @{ + */ + +/** @addtogroup _mali_osk_lock OSK Mutual Exclusion Locks + * @{ */ + +#ifdef DEBUG +/** @brief Macro for asserting that the current thread holds a given lock + */ +#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner((_mali_osk_lock_debug_t *)l) == _mali_osk_get_tid()); + +/** @brief returns a lock's owner (thread id) if debugging is enabled + */ +#else +#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0) +#endif + +/** @} */ /* end group _mali_osk_lock */ + +/** @addtogroup _mali_osk_miscellaneous + * @{ */ + +/** @brief Find the containing structure of another structure + * + * This is the reverse of the operation 'offsetof'. This means that the + * following condition is satisfied: + * + * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member ) + * + * When ptr is of type 'type'. + * + * Its purpose it to recover a larger structure that has wrapped a smaller one. + * + * @note no type or memory checking occurs to ensure that a wrapper structure + * does in fact exist, and that it is being recovered with respect to the + * correct member. + * + * @param ptr the pointer to the member that is contained within the larger + * structure + * @param type the type of the structure that contains the member + * @param member the name of the member in the structure that ptr points to. + * @return a pointer to a \a type object which contains \a member, as pointed + * to by \a ptr. + */ +#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \ + ((type *)( ((char *)ptr) - offsetof(type,member) )) + +/** @addtogroup _mali_osk_wq + * @{ */ + +/** @brief Initialize work queues (for deferred work) + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_wq_init(void); + +/** @brief Terminate work queues (for deferred work) + */ +void _mali_osk_wq_term(void); + +/** @brief Create work in the work queue + * + * Creates a work object which can be scheduled in the work queue. When + * scheduled, \a handler will be called with \a data as the argument. + * + * Refer to \ref _mali_osk_wq_schedule_work() for details on how work + * is scheduled in the queue. + * + * The returned pointer must be freed with \ref _mali_osk_wq_delete_work() + * when no longer needed. + */ +_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data); + +/** @brief A high priority version of \a _mali_osk_wq_create_work() + * + * Creates a work object which can be scheduled in the high priority work queue. + * + * This is unfortunately needed to get low latency scheduling of the Mali cores. Normally we would + * schedule the next job in hw_irq or tasklet, but often we can't since we need to synchronously map + * and unmap shared memory when a job is connected to external fences (timelines). And this requires + * taking a mutex. + * + * We do signal a lot of other (low priority) work also as part of the job being finished, and if we + * don't set this Mali scheduling thread as high priority, we see that the CPU scheduler often runs + * random things instead of starting the next GPU job when the GPU is idle. So setting the gpu + * scheduler to high priority does give a visually more responsive system. + * + * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri() + */ +_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data); + +/** @brief Delete a work object + * + * This will flush the work queue to ensure that the work handler will not + * be called after deletion. + */ +void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work); + +/** @brief Delete a work object + * + * This will NOT flush the work queue, so only call this if you are sure that the work handler will + * not be called after deletion. + */ +void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work); + +/** @brief Cause a queued, deferred call of the work handler + * + * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls + * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the + * work handler will be scheduled to run at some point in the future. + * + * Typically this is called by the IRQ upper-half to defer further processing of + * IRQ-related work to the IRQ bottom-half handler. This is necessary for work + * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer + * callbacks also use this mechanism, because they are treated as though they + * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more + * information. + * + * Code that operates in a kernel-process context (with no IRQ context + * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The + * advantage over direct calling is that deferred calling allows the caller and + * IRQ bottom half to hold the same mutex, with a guarantee that they will not + * deadlock just by using this mechanism. + * + * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to + * allow for more than one thread to make a deferred call. Therfore, if it is + * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too. + * 'K' is a number that is implementation-specific. + * + * _mali_osk_wq_schedule_work() is guaranteed to not block on: + * - enqueuing a deferred call request. + * - the completion of the work handler. + * + * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work() + * blocked, then it would cause a deadlock when the following two conditions + * hold: + * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks + * a mutex + * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also + * holds the same mutex + * + * @note care must be taken to not overflow the queue that + * _mali_osk_wq_schedule_work() operates on. Code must be structured to + * ensure that the number of requests made to the queue is bounded. Otherwise, + * work will be lost. + * + * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer, + * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work + * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers, + * callers from a Kernel-process context). The reader is a single thread that + * handles all OSK-registered work. + * + * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the + * work to begin processing. + */ +void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work); + +/** @brief Cause a queued, deferred call of the high priority work handler + * + * Function is the same as \a _mali_osk_wq_schedule_work() with the only + * difference that it runs in a high (real time) priority on the system. + * + * Should only be used as a substitue for doing the same work in interrupts. + * + * This is allowed to sleep, but the work should be small since it will block + * all other applications. +*/ +void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work); + +/** @brief Flush the work queue + * + * This will flush the OSK work queue, ensuring all work in the queue has + * completed before returning. + * + * Since this blocks on the completion of work in the work-queue, the + * caller of this function \b must \b not hold any mutexes that are taken by + * any registered work handler. To do so may cause a deadlock. + * + */ +void _mali_osk_wq_flush(void); + +/** @brief Create work in the delayed work queue + * + * Creates a work object which can be scheduled in the work queue. When + * scheduled, a timer will be start and the \a handler will be called with + * \a data as the argument when timer out + * + * Refer to \ref _mali_osk_wq_delayed_schedule_work() for details on how work + * is scheduled in the queue. + * + * The returned pointer must be freed with \ref _mali_osk_wq_delayed_delete_work_nonflush() + * when no longer needed. + */ +_mali_osk_wq_delayed_work_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data); + +/** @brief Delete a work object + * + * This will NOT flush the work queue, so only call this if you are sure that the work handler will + * not be called after deletion. + */ +void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work); + +/** @brief Cancel a delayed work without waiting for it to finish + * + * Note that the \a work callback function may still be running on return from + * _mali_osk_wq_delayed_cancel_work_async(). + * + * @param work The delayed work to be cancelled + */ +void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work); + +/** @brief Cancel a delayed work and wait for it to finish + * + * When this function returns, the \a work was either cancelled or it finished running. + * + * @param work The delayed work to be cancelled + */ +void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work); + +/** @brief Put \a work task in global workqueue after delay + * + * After waiting for a given time this puts a job in the kernel-global + * workqueue. + * + * If \a work was already on a queue, this function will return without doing anything + * + * @param work job to be done + * @param delay number of jiffies to wait or 0 for immediate execution + */ +void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay); + +/** @} */ /* end group _mali_osk_wq */ + + +/** @addtogroup _mali_osk_irq + * @{ */ + +/** @brief Initialize IRQ handling for a resource + * + * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum. + * \a data will be passed as argument to the handler when an interrupt occurs. + * + * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using + * the supplied \a trigger_func and \a ack_func. These functions will also + * receive \a data as their argument. + * + * @param irqnum The IRQ number that the resource uses, as seen by the CPU. + * The value -1 has a special meaning which indicates the use of probing, and + * trigger_func and ack_func must be non-NULL. + * @param uhandler The interrupt handler, corresponding to a ISR handler for + * the resource + * @param int_data resource specific data, which will be passed to uhandler + * @param trigger_func Optional: a function to trigger the resource's irq, to + * probe for the interrupt. Use NULL if irqnum != -1. + * @param ack_func Optional: a function to acknowledge the resource's irq, to + * probe for the interrupt. Use NULL if irqnum != -1. + * @param probe_data resource-specific data, which will be passed to + * (if present) trigger_func and ack_func + * @param description textual description of the IRQ resource. + * @return on success, a pointer to a _mali_osk_irq_t object, which represents + * the IRQ handling on this resource. NULL on failure. + */ +_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description); + +/** @brief Terminate IRQ handling on a resource. + * + * This will disable the interrupt from the device, and then waits for any + * currently executing IRQ handlers to complete. + * + * @note If work is deferred to an IRQ bottom-half handler through + * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work + * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work() + * + * @param irq a pointer to the _mali_osk_irq_t object corresponding to the + * resource whose IRQ handling is to be terminated. + */ +void _mali_osk_irq_term(_mali_osk_irq_t *irq); + +/** @} */ /* end group _mali_osk_irq */ + + +/** @addtogroup _mali_osk_atomic + * @{ */ + +/** @brief Decrement an atomic counter + * + * @note It is an error to decrement the counter beyond -(1<<23) + * + * @param atom pointer to an atomic counter */ +void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom); + +/** @brief Decrement an atomic counter, return new value + * + * @param atom pointer to an atomic counter + * @return The new value, after decrement */ +u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom); + +/** @brief Increment an atomic counter + * + * @note It is an error to increment the counter beyond (1<<23)-1 + * + * @param atom pointer to an atomic counter */ +void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom); + +/** @brief Increment an atomic counter, return new value + * + * @param atom pointer to an atomic counter */ +u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom); + +/** @brief Initialize an atomic counter + * + * @note the parameter required is a u32, and so signed integers should be + * cast to u32. + * + * @param atom pointer to an atomic counter + * @param val the value to initialize the atomic counter. + */ +void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val); + +/** @brief Read a value from an atomic counter + * + * This can only be safely used to determine the value of the counter when it + * is guaranteed that other threads will not be modifying the counter. This + * makes its usefulness limited. + * + * @param atom pointer to an atomic counter + */ +u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom); + +/** @brief Terminate an atomic counter + * + * @param atom pointer to an atomic counter + */ +void _mali_osk_atomic_term(_mali_osk_atomic_t *atom); + +/** @brief Assign a new val to atomic counter, and return the old atomic counter + * + * @param atom pointer to an atomic counter + * @param val the new value assign to the atomic counter + * @return the old value of the atomic counter + */ +u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val); +/** @} */ /* end group _mali_osk_atomic */ + + +/** @defgroup _mali_osk_memory OSK Memory Allocation + * @{ */ + +/** @brief Allocate zero-initialized memory. + * + * Returns a buffer capable of containing at least \a n elements of \a size + * bytes each. The buffer is initialized to zero. + * + * If there is a need for a bigger block of memory (16KB or bigger), then + * consider to use _mali_osk_vmalloc() instead, as this function might + * map down to a OS function with size limitations. + * + * The buffer is suitably aligned for storage and subsequent access of every + * type that the compiler supports. Therefore, the pointer to the start of the + * buffer may be cast into any pointer type, and be subsequently accessed from + * such a pointer, without loss of information. + * + * When the buffer is no longer in use, it must be freed with _mali_osk_free(). + * Failure to do so will cause a memory leak. + * + * @note Most toolchains supply memory allocation functions that meet the + * compiler's alignment requirements. + * + * @param n Number of elements to allocate + * @param size Size of each element + * @return On success, the zero-initialized buffer allocated. NULL on failure + */ +void *_mali_osk_calloc(u32 n, u32 size); + +/** @brief Allocate memory. + * + * Returns a buffer capable of containing at least \a size bytes. The + * contents of the buffer are undefined. + * + * If there is a need for a bigger block of memory (16KB or bigger), then + * consider to use _mali_osk_vmalloc() instead, as this function might + * map down to a OS function with size limitations. + * + * The buffer is suitably aligned for storage and subsequent access of every + * type that the compiler supports. Therefore, the pointer to the start of the + * buffer may be cast into any pointer type, and be subsequently accessed from + * such a pointer, without loss of information. + * + * When the buffer is no longer in use, it must be freed with _mali_osk_free(). + * Failure to do so will cause a memory leak. + * + * @note Most toolchains supply memory allocation functions that meet the + * compiler's alignment requirements. + * + * Remember to free memory using _mali_osk_free(). + * @param size Number of bytes to allocate + * @return On success, the buffer allocated. NULL on failure. + */ +void *_mali_osk_malloc(u32 size); + +/** @brief Free memory. + * + * Reclaims the buffer pointed to by the parameter \a ptr for the system. + * All memory returned from _mali_osk_malloc() and _mali_osk_calloc() + * must be freed before the application exits. Otherwise, + * a memory leak will occur. + * + * Memory must be freed once. It is an error to free the same non-NULL pointer + * more than once. + * + * It is legal to free the NULL pointer. + * + * @param ptr Pointer to buffer to free + */ +void _mali_osk_free(void *ptr); + +/** @brief Allocate memory. + * + * Returns a buffer capable of containing at least \a size bytes. The + * contents of the buffer are undefined. + * + * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(), + * but do support bigger sizes. + * + * The buffer is suitably aligned for storage and subsequent access of every + * type that the compiler supports. Therefore, the pointer to the start of the + * buffer may be cast into any pointer type, and be subsequently accessed from + * such a pointer, without loss of information. + * + * When the buffer is no longer in use, it must be freed with _mali_osk_free(). + * Failure to do so will cause a memory leak. + * + * @note Most toolchains supply memory allocation functions that meet the + * compiler's alignment requirements. + * + * Remember to free memory using _mali_osk_free(). + * @param size Number of bytes to allocate + * @return On success, the buffer allocated. NULL on failure. + */ +void *_mali_osk_valloc(u32 size); + +/** @brief Free memory. + * + * Reclaims the buffer pointed to by the parameter \a ptr for the system. + * All memory returned from _mali_osk_valloc() must be freed before the + * application exits. Otherwise a memory leak will occur. + * + * Memory must be freed once. It is an error to free the same non-NULL pointer + * more than once. + * + * It is legal to free the NULL pointer. + * + * @param ptr Pointer to buffer to free + */ +void _mali_osk_vfree(void *ptr); + +/** @brief Copies memory. + * + * Copies the \a len bytes from the buffer pointed by the parameter \a src + * directly to the buffer pointed by \a dst. + * + * It is an error for \a src to overlap \a dst anywhere in \a len bytes. + * + * @param dst Pointer to the destination array where the content is to be + * copied. + * @param src Pointer to the source of data to be copied. + * @param len Number of bytes to copy. + * @return \a dst is always passed through unmodified. + */ +void *_mali_osk_memcpy(void *dst, const void *src, u32 len); + +/** @brief Fills memory. + * + * Sets the first \a n bytes of the block of memory pointed to by \a s to + * the specified value + * @param s Pointer to the block of memory to fill. + * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB) + * are used. + * @param n Number of bytes to be set to the value. + * @return \a s is always passed through unmodified + */ +void *_mali_osk_memset(void *s, u32 c, u32 n); +/** @} */ /* end group _mali_osk_memory */ + + +/** @brief Checks the amount of memory allocated + * + * Checks that not more than \a max_allocated bytes are allocated. + * + * Some OS bring up an interactive out of memory dialogue when the + * system runs out of memory. This can stall non-interactive + * apps (e.g. automated test runs). This function can be used to + * not trigger the OOM dialogue by keeping allocations + * within a certain limit. + * + * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE + * when at least \a max_allocated bytes are in use. + */ +mali_bool _mali_osk_mem_check_allocated(u32 max_allocated); + + +/** @addtogroup _mali_osk_low_level_memory + * @{ */ + +/** @brief Issue a memory barrier + * + * This defines an arbitrary memory barrier operation, which forces an ordering constraint + * on memory read and write operations. + */ +void _mali_osk_mem_barrier(void); + +/** @brief Issue a write memory barrier + * + * This defines an write memory barrier operation which forces an ordering constraint + * on memory write operations. + */ +void _mali_osk_write_mem_barrier(void); + +/** @brief Map a physically contiguous region into kernel space + * + * This is primarily used for mapping in registers from resources, and Mali-MMU + * page tables. The mapping is only visable from kernel-space. + * + * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32 + * + * @param phys CPU-physical base address of the memory to map in. This must + * be aligned to the system's page size, which is assumed to be 4K. + * @param size the number of bytes of physically contiguous address space to + * map in + * @param description A textual description of the memory being mapped in. + * @return On success, a Mali IO address through which the mapped-in + * memory/registers can be accessed. NULL on failure. + */ +mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description); + +/** @brief Unmap a physically contiguous address range from kernel space. + * + * The address range should be one previously mapped in through + * _mali_osk_mem_mapioregion. + * + * It is a programming error to do (but not limited to) the following: + * - attempt an unmap twice + * - unmap only part of a range obtained through _mali_osk_mem_mapioregion + * - unmap more than the range obtained through _mali_osk_mem_mapioregion + * - unmap an address range that was not successfully mapped using + * _mali_osk_mem_mapioregion + * - provide a mapping that does not map to phys. + * + * @param phys CPU-physical base address of the memory that was originally + * mapped in. This must be aligned to the system's page size, which is assumed + * to be 4K + * @param size The number of bytes that were originally mapped in. + * @param mapping The Mali IO address through which the mapping is + * accessed. + */ +void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address mapping); + +/** @brief Allocate and Map a physically contiguous region into kernel space + * + * This is used for allocating physically contiguous regions (such as Mali-MMU + * page tables) and mapping them into kernel space. The mapping is only + * visible from kernel-space. + * + * The alignment of the returned memory is guaranteed to be at least + * _MALI_OSK_CPU_PAGE_SIZE. + * + * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32 + * + * @note This function is primarily to provide support for OSs that are + * incapable of separating the tasks 'allocate physically contiguous memory' + * and 'map it into kernel space' + * + * @param[out] phys CPU-physical base address of memory that was allocated. + * (*phys) will be guaranteed to be aligned to at least + * _MALI_OSK_CPU_PAGE_SIZE on success. + * + * @param[in] size the number of bytes of physically contiguous memory to + * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE. + * + * @return On success, a Mali IO address through which the mapped-in + * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified. + */ +mali_io_address _mali_osk_mem_allocioregion(u32 *phys, u32 size); + +/** @brief Free a physically contiguous address range from kernel space. + * + * The address range should be one previously mapped in through + * _mali_osk_mem_allocioregion. + * + * It is a programming error to do (but not limited to) the following: + * - attempt a free twice on the same ioregion + * - free only part of a range obtained through _mali_osk_mem_allocioregion + * - free more than the range obtained through _mali_osk_mem_allocioregion + * - free an address range that was not successfully mapped using + * _mali_osk_mem_allocioregion + * - provide a mapping that does not map to phys. + * + * @param phys CPU-physical base address of the memory that was originally + * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE. + * @param size The number of bytes that were originally mapped in, which was + * a multiple of _MALI_OSK_CPU_PAGE_SIZE. + * @param mapping The Mali IO address through which the mapping is + * accessed. + */ +void _mali_osk_mem_freeioregion(u32 phys, u32 size, mali_io_address mapping); + +/** @brief Request a region of physically contiguous memory + * + * This is used to ensure exclusive access to a region of physically contigous + * memory. + * + * It is acceptable to implement this as a stub. However, it is then the job + * of the System Integrator to ensure that no other device driver will be using + * the physical address ranges used by Mali, while the Mali device driver is + * loaded. + * + * @param phys CPU-physical base address of the memory to request. This must + * be aligned to the system's page size, which is assumed to be 4K. + * @param size the number of bytes of physically contiguous address space to + * request. + * @param description A textual description of the memory being requested. + * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable + * _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description); + +/** @brief Un-request a region of physically contiguous memory + * + * This is used to release a regious of physically contiguous memory previously + * requested through _mali_osk_mem_reqregion, so that other device drivers may + * use it. This will be called at time of Mali device driver termination. + * + * It is a programming error to attempt to: + * - unrequest a region twice + * - unrequest only part of a range obtained through _mali_osk_mem_reqregion + * - unrequest more than the range obtained through _mali_osk_mem_reqregion + * - unrequest an address range that was not successfully requested using + * _mali_osk_mem_reqregion + * + * @param phys CPU-physical base address of the memory to un-request. This must + * be aligned to the system's page size, which is assumed to be 4K + * @param size the number of bytes of physically contiguous address space to + * un-request. + */ +void _mali_osk_mem_unreqregion(uintptr_t phys, u32 size); + +/** @brief Read from a location currently mapped in through + * _mali_osk_mem_mapioregion + * + * This reads a 32-bit word from a 32-bit aligned location. It is a programming + * error to provide unaligned locations, or to read from memory that is not + * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or + * _mali_osk_mem_allocioregion(). + * + * @param mapping Mali IO address to read from + * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4 + * @return the 32-bit word from the specified location. + */ +u32 _mali_osk_mem_ioread32(volatile mali_io_address mapping, u32 offset); + +/** @brief Write to a location currently mapped in through + * _mali_osk_mem_mapioregion without memory barriers + * + * This write a 32-bit word to a 32-bit aligned location without using memory barrier. + * It is a programming error to provide unaligned locations, or to write to memory that is not + * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or + * _mali_osk_mem_allocioregion(). + * + * @param mapping Mali IO address to write to + * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4 + * @param val the 32-bit word to write. + */ +void _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val); + +/** @brief Write to a location currently mapped in through + * _mali_osk_mem_mapioregion with write memory barrier + * + * This write a 32-bit word to a 32-bit aligned location. It is a programming + * error to provide unaligned locations, or to write to memory that is not + * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or + * _mali_osk_mem_allocioregion(). + * + * @param mapping Mali IO address to write to + * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4 + * @param val the 32-bit word to write. + */ +void _mali_osk_mem_iowrite32(volatile mali_io_address mapping, u32 offset, u32 val); + +/** @brief Flush all CPU caches + * + * This should only be implemented if flushing of the cache is required for + * memory mapped in through _mali_osk_mem_mapregion. + */ +void _mali_osk_cache_flushall(void); + +/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory + * + * This should only be implemented if your OS doesn't do a full cache flush (inner & outer) + * after allocating uncached mapped memory. + * + * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory. + * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches. + * This is required for MALI to have the correct view of the memory. + */ +void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size); + +/** @brief Safely copy as much data as possible from src to dest + * + * Do not crash if src or dest isn't available. + * + * @param dest Destination buffer (limited to user space mapped Mali memory) + * @param src Source buffer + * @param size Number of bytes to copy + * @return Number of bytes actually copied + */ +u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size); + +/** @} */ /* end group _mali_osk_low_level_memory */ + + +/** @addtogroup _mali_osk_notification + * + * User space notification framework + * + * Communication with user space of asynchronous events is performed through a + * synchronous call to the \ref u_k_api. + * + * Since the events are asynchronous, the events have to be queued until a + * synchronous U/K API call can be made by user-space. A U/K API call might also + * be received before any event has happened. Therefore the notifications the + * different subsystems wants to send to user space has to be queued for later + * reception, or a U/K API call has to be blocked until an event has occured. + * + * Typical uses of notifications are after running of jobs on the hardware or + * when changes to the system is detected that needs to be relayed to user + * space. + * + * After an event has occured user space has to be notified using some kind of + * message. The notification framework supports sending messages to waiting + * threads or queueing of messages until a U/K API call is made. + * + * The notification queue is a FIFO. There are no restrictions on the numbers + * of readers or writers in the queue. + * + * A message contains what user space needs to identifiy how to handle an + * event. This includes a type field and a possible type specific payload. + * + * A notification to user space is represented by a + * \ref _mali_osk_notification_t object. A sender gets hold of such an object + * using _mali_osk_notification_create(). The buffer given by the + * _mali_osk_notification_t::result_buffer field in the object is used to store + * any type specific data. The other fields are internal to the queue system + * and should not be touched. + * + * @{ */ + +/** @brief Create a notification object + * + * Returns a notification object which can be added to the queue of + * notifications pending for user space transfer. + * + * The implementation will initialize all members of the + * \ref _mali_osk_notification_t object. In particular, the + * _mali_osk_notification_t::result_buffer member will be initialized to point + * to \a size bytes of storage, and that storage will be suitably aligned for + * storage of any structure. That is, the created buffer meets the same + * requirements as _mali_osk_malloc(). + * + * The notification object must be deleted when not in use. Use + * _mali_osk_notification_delete() for deleting it. + * + * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t, + * object, or on a _mali_osk_notification_t::result_buffer. You must only use + * _mali_osk_notification_delete() to free the resources assocaited with a + * \ref _mali_osk_notification_t object. + * + * @param type The notification type + * @param size The size of the type specific buffer to send + * @return Pointer to a notification object with a suitable buffer, or NULL on error. + */ +_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size); + +/** @brief Delete a notification object + * + * This must be called to reclaim the resources of a notification object. This + * includes: + * - The _mali_osk_notification_t::result_buffer + * - The \ref _mali_osk_notification_t itself. + * + * A notification object \b must \b not be used after it has been deleted by + * _mali_osk_notification_delete(). + * + * In addition, the notification object may not be deleted while it is in a + * queue. That is, if it has been placed on a queue with + * _mali_osk_notification_queue_send(), then it must not be deleted until + * it has been received by a call to _mali_osk_notification_queue_receive(). + * Otherwise, the queue may be corrupted. + * + * @param object the notification object to delete. + */ +void _mali_osk_notification_delete(_mali_osk_notification_t *object); + +/** @brief Create a notification queue + * + * Creates a notification queue which can be used to queue messages for user + * delivery and get queued messages from + * + * The queue is a FIFO, and has no restrictions on the numbers of readers or + * writers. + * + * When the queue is no longer in use, it must be terminated with + * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a + * memory leak. + * + * @return Pointer to a new notification queue or NULL on error. + */ +_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void); + +/** @brief Destroy a notification queue + * + * Destroys a notification queue and frees associated resources from the queue. + * + * A notification queue \b must \b not be destroyed in the following cases: + * - while there are \ref _mali_osk_notification_t objects in the queue. + * - while there are writers currently acting upon the queue. That is, while + * a thread is currently calling \ref _mali_osk_notification_queue_send() on + * the queue, or while a thread may call + * \ref _mali_osk_notification_queue_send() on the queue in the future. + * - while there are readers currently waiting upon the queue. That is, while + * a thread is currently calling \ref _mali_osk_notification_queue_receive() on + * the queue, or while a thread may call + * \ref _mali_osk_notification_queue_receive() on the queue in the future. + * + * Therefore, all \ref _mali_osk_notification_t objects must be flushed and + * deleted by the code that makes use of the notification queues, since only + * they know the structure of the _mali_osk_notification_t::result_buffer + * (even if it may only be a flat sturcture). + * + * @note Since the queue is a FIFO, the code using notification queues may + * create its own 'flush' type of notification, to assist in flushing the + * queue. + * + * Once the queue has been destroyed, it must not be used again. + * + * @param queue The queue to destroy + */ +void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue); + +/** @brief Schedule notification for delivery + * + * When a \ref _mali_osk_notification_t object has been created successfully + * and set up, it may be added to the queue of objects waiting for user space + * transfer. + * + * The sending will not block if the queue is full. + * + * A \ref _mali_osk_notification_t object \b must \b not be put on two different + * queues at the same time, or enqueued twice onto a single queue before + * reception. However, it is acceptable for it to be requeued \em after reception + * from a call to _mali_osk_notification_queue_receive(), even onto the same queue. + * + * Again, requeuing must also not enqueue onto two different queues at the same + * time, or enqueue onto the same queue twice before reception. + * + * @param queue The notification queue to add this notification to + * @param object The entry to add + */ +void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object); + +/** @brief Receive a notification from a queue + * + * Receives a single notification from the given queue. + * + * If no notifciations are ready the thread will sleep until one becomes ready. + * Therefore, notifications may not be received into an + * IRQ or 'atomic' context (that is, a context where sleeping is disallowed). + * + * @param queue The queue to receive from + * @param result Pointer to storage of a pointer of type + * \ref _mali_osk_notification_t*. \a result will be written to such that the + * expression \a (*result) will evaluate to a pointer to a valid + * \ref _mali_osk_notification_t object, or NULL if none were received. + * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted. + */ +_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result); + +/** @brief Dequeues a notification from a queue + * + * Receives a single notification from the given queue. + * + * If no notifciations are ready the function call will return an error code. + * + * @param queue The queue to receive from + * @param result Pointer to storage of a pointer of type + * \ref _mali_osk_notification_t*. \a result will be written to such that the + * expression \a (*result) will evaluate to a pointer to a valid + * \ref _mali_osk_notification_t object, or NULL if none were received. + * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty. + */ +_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result); + +/** @} */ /* end group _mali_osk_notification */ + + +/** @addtogroup _mali_osk_timer + * + * Timers use the OS's representation of time, which are 'ticks'. This is to + * prevent aliasing problems between the internal timer time, and the time + * asked for. + * + * @{ */ + +/** @brief Initialize a timer + * + * Allocates resources for a new timer, and initializes them. This does not + * start the timer. + * + * @return a pointer to the allocated timer object, or NULL on failure. + */ +_mali_osk_timer_t *_mali_osk_timer_init(void); + +/** @brief Start a timer + * + * It is an error to start a timer without setting the callback via + * _mali_osk_timer_setcallback(). + * + * It is an error to use this to start an already started timer. + * + * The timer will expire in \a ticks_to_expire ticks, at which point, the + * callback function will be invoked with the callback-specific data, + * as registered by _mali_osk_timer_setcallback(). + * + * @param tim the timer to start + * @param ticks_to_expire the amount of time in ticks for the timer to run + * before triggering. + */ +void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire); + +/** @brief Modify a timer + * + * Set the relative time at which a timer will expire, and start it if it is + * stopped. If \a ticks_to_expire 0 the timer fires immediately. + * + * It is an error to modify a timer without setting the callback via + * _mali_osk_timer_setcallback(). + * + * The timer will expire at \a ticks_to_expire from the time of the call, at + * which point, the callback function will be invoked with the + * callback-specific data, as set by _mali_osk_timer_setcallback(). + * + * @param tim the timer to modify, and start if necessary + * @param ticks_to_expire the \em absolute time in ticks at which this timer + * should trigger. + * + */ +void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire); + +/** @brief Stop a timer, and block on its completion. + * + * Stop the timer. When the function returns, it is guaranteed that the timer's + * callback will not be running on any CPU core. + * + * Since stoping the timer blocks on compeletion of the callback, the callback + * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will + * occur. + * + * @note While the callback itself is guaranteed to not be running, work + * enqueued on the work-queue by the timer (with + * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and + * work handler must take this into account. + * + * It is legal to stop an already stopped timer. + * + * @param tim the timer to stop. + * + */ +void _mali_osk_timer_del(_mali_osk_timer_t *tim); + +/** @brief Stop a timer. + * + * Stop the timer. When the function returns, the timer's callback may still be + * running on any CPU core. + * + * It is legal to stop an already stopped timer. + * + * @param tim the timer to stop. + */ +void _mali_osk_timer_del_async(_mali_osk_timer_t *tim); + +/** @brief Check if timer is pending. + * + * Check if timer is active. + * + * @param tim the timer to check + * @return MALI_TRUE if time is active, MALI_FALSE if it is not active + */ +mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim); + +/** @brief Set a timer's callback parameters. + * + * This must be called at least once before a timer is started/modified. + * + * After a timer has been stopped or expires, the callback remains set. This + * means that restarting the timer will call the same function with the same + * parameters on expiry. + * + * @param tim the timer to set callback on. + * @param callback Function to call when timer expires + * @param data Function-specific data to supply to the function on expiry. + */ +void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data); + +/** @brief Terminate a timer, and deallocate resources. + * + * The timer must first be stopped by calling _mali_osk_timer_del(). + * + * It is a programming error for _mali_osk_timer_term() to be called on: + * - timer that is currently running + * - a timer that is currently executing its callback. + * + * @param tim the timer to deallocate. + */ +void _mali_osk_timer_term(_mali_osk_timer_t *tim); +/** @} */ /* end group _mali_osk_timer */ + + +/** @defgroup _mali_osk_time OSK Time functions + * + * \ref _mali_osk_time use the OS's representation of time, which are + * 'ticks'. This is to prevent aliasing problems between the internal timer + * time, and the time asked for. + * + * OS tick time is measured as a u32. The time stored in a u32 may either be + * an absolute time, or a time delta between two events. Whilst it is valid to + * use math opeartors to \em change the tick value represented as a u32, it + * is often only meaningful to do such operations on time deltas, rather than + * on absolute time. However, it is meaningful to add/subtract time deltas to + * absolute times. + * + * Conversion between tick time and milliseconds (ms) may not be loss-less, + * and are \em implementation \em depenedant. + * + * Code use OS time must take this into account, since: + * - a small OS time may (or may not) be rounded + * - a large time may (or may not) overflow + * + * @{ */ + +/** @brief Return whether ticka occurs after or at the same time as tickb + * + * Systems where ticks can wrap must handle that. + * + * @param ticka ticka + * @param tickb tickb + * @return MALI_TRUE if ticka represents a time that occurs at or after tickb. + */ +mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb); + +/** @brief Convert milliseconds to OS 'ticks' + * + * @param ms time interval in milliseconds + * @return the corresponding time interval in OS ticks. + */ +unsigned long _mali_osk_time_mstoticks(u32 ms); + +/** @brief Convert OS 'ticks' to milliseconds + * + * @param ticks time interval in OS ticks. + * @return the corresponding time interval in milliseconds + */ +u32 _mali_osk_time_tickstoms(unsigned long ticks); + + +/** @brief Get the current time in OS 'ticks'. + * @return the current time in OS 'ticks'. + */ +unsigned long _mali_osk_time_tickcount(void); + +/** @brief Cause a microsecond delay + * + * The delay will have microsecond resolution, and is necessary for correct + * operation of the driver. At worst, the delay will be \b at least \a usecs + * microseconds, and so may be (significantly) more. + * + * This function may be implemented as a busy-wait, which is the most sensible + * implementation. On OSs where there are situations in which a thread must not + * sleep, this is definitely implemented as a busy-wait. + * + * @param usecs the number of microseconds to wait for. + */ +void _mali_osk_time_ubusydelay(u32 usecs); + +/** @brief Return time in nano seconds, since any given reference. + * + * @return Time in nano seconds + */ +u64 _mali_osk_time_get_ns(void); + +/** @brief Return time in nano seconds, since boot time. + * + * @return Time in nano seconds + */ +u64 _mali_osk_boot_time_get_ns(void); + +/** @} */ /* end group _mali_osk_time */ + +/** @defgroup _mali_osk_math OSK Math + * @{ */ + +/** @brief Count Leading Zeros (Little-endian) + * + * @note This function must be implemented to support the reference + * implementation of _mali_osk_find_first_zero_bit, as defined in + * mali_osk_bitops.h. + * + * @param val 32-bit words to count leading zeros on + * @return the number of leading zeros. + */ +u32 _mali_osk_clz(u32 val); + +/** @brief find last (most-significant) bit set + * + * @param val 32-bit words to count last bit set on + * @return last bit set. + */ +u32 _mali_osk_fls(u32 val); + +/** @} */ /* end group _mali_osk_math */ + +/** @addtogroup _mali_osk_wait_queue OSK Wait Queue functionality + * @{ */ + +/** @brief Initialize an empty Wait Queue */ +_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void); + +/** @brief Sleep if condition is false + * + * @param queue the queue to use + * @param condition function pointer to a boolean function + * @param data data parameter for condition function + * + * Put thread to sleep if the given \a condition function returns false. When + * being asked to wake up again, the condition will be re-checked and the + * thread only woken up if the condition is now true. + */ +void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data); + +/** @brief Sleep if condition is false + * + * @param queue the queue to use + * @param condition function pointer to a boolean function + * @param data data parameter for condition function + * @param timeout timeout in ms + * + * Put thread to sleep if the given \a condition function returns false. When + * being asked to wake up again, the condition will be re-checked and the + * thread only woken up if the condition is now true. Will return if time + * exceeds timeout. + */ +void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout); + +/** @brief Wake up all threads in wait queue if their respective conditions are + * true + * + * @param queue the queue whose threads should be woken up + * + * Wake up all threads in wait queue \a queue whose condition is now true. + */ +void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue); + +/** @brief terminate a wait queue + * + * @param queue the queue to terminate. + */ +void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue); +/** @} */ /* end group _mali_osk_wait_queue */ + + +/** @addtogroup _mali_osk_miscellaneous + * @{ */ + +/** @brief Output a device driver debug message. + * + * The interpretation of \a fmt is the same as the \c format parameter in + * _mali_osu_vsnprintf(). + * + * @param fmt a _mali_osu_vsnprintf() style format string + * @param ... a variable-number of parameters suitable for \a fmt + */ +void _mali_osk_dbgmsg(const char *fmt, ...); + +/** @brief Print fmt into buf. + * + * The interpretation of \a fmt is the same as the \c format parameter in + * _mali_osu_vsnprintf(). + * + * @param buf a pointer to the result buffer + * @param size the total number of bytes allowed to write to \a buf + * @param fmt a _mali_osu_vsnprintf() style format string + * @param ... a variable-number of parameters suitable for \a fmt + * @return The number of bytes written to \a buf + */ +u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...); + +/** @brief Print fmt into print_ctx. + * + * The interpretation of \a fmt is the same as the \c format parameter in + * _mali_osu_vsnprintf(). + * + * @param print_ctx a pointer to the result file buffer + * @param fmt a _mali_osu_vsnprintf() style format string + * @param ... a variable-number of parameters suitable for \a fmt + */ +void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...); + +/** @brief Abnormal process abort. + * + * Terminates the caller-process if this function is called. + * + * This function will be called from Debug assert-macros in mali_kernel_common.h. + * + * This function will never return - because to continue from a Debug assert + * could cause even more problems, and hinder debugging of the initial problem. + * + * This function is only used in Debug builds, and is not used in Release builds. + */ +void _mali_osk_abort(void); + +/** @brief Sets breakpoint at point where function is called. + * + * This function will be called from Debug assert-macros in mali_kernel_common.h, + * to assist in debugging. If debugging at this level is not required, then this + * function may be implemented as a stub. + * + * This function is only used in Debug builds, and is not used in Release builds. + */ +void _mali_osk_break(void); + +/** @brief Return an identificator for calling process. + * + * @return Identificator for calling process. + */ +u32 _mali_osk_get_pid(void); + +/** @brief Return an name for calling process. + * + * @return name for calling process. + */ +char *_mali_osk_get_comm(void); + +/** @brief Return an identificator for calling thread. + * + * @return Identificator for calling thread. + */ +u32 _mali_osk_get_tid(void); + + +/** @brief Take a reference to the power manager system for the Mali device (synchronously). + * + * When function returns successfully, Mali is ON. + * + * @note Call \a _mali_osk_pm_dev_ref_put() to release this reference. + */ +_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void); + +/** @brief Take a reference to the external power manager system for the Mali device (asynchronously). + * + * Mali might not yet be on after this function as returned. + * Please use \a _mali_osk_pm_dev_barrier() or \a _mali_osk_pm_dev_ref_get_sync() + * to wait for Mali to be powered on. + * + * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference. + */ +_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void); + +/** @brief Release the reference to the external power manger system for the Mali device. + * + * When reference count reach zero, the cores can be off. + * + * @note This must be used to release references taken with + * \a _mali_osk_pm_dev_ref_get_sync() or \a _mali_osk_pm_dev_ref_get_sync(). + */ +void _mali_osk_pm_dev_ref_put(void); + +/** @brief Block until pending PM operations are done + */ +void _mali_osk_pm_dev_barrier(void); + +/** @} */ /* end group _mali_osk_miscellaneous */ + +/** @defgroup _mali_osk_bitmap OSK Bitmap + * @{ */ + +/** @brief Allocate a unique number from the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @return An unique existence in the bitmap object. + */ +u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap); + +/** @brief Free a interger to the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @param obj An number allocated from bitmap object. + */ +void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj); + +/** @brief Allocate continuous number from the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @return start number of the continuous number block. + */ +u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt); + +/** @brief Free a block of continuous number block to the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @param obj Start number. + * @param cnt The size of the continuous number block. + */ +void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt); + +/** @brief Available count could be used to allocate in the given bitmap object. + * + */ +u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap); + +/** @brief Initialize an bitmap object.. + * + * @param bitmap An poiter of uninitialized bitmap object. + * @param num Size of thei bitmap object and decide the memory size allocated. + * @param reserve start number used to allocate. + */ +int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve); + +/** @brief Free the given bitmap object. + * + * @param bitmap Initialized bitmap object. + */ +void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap); +/** @} */ /* end group _mali_osk_bitmap */ + +/** @} */ /* end group osuapi */ + +/** @} */ /* end group uddapi */ + + + +#ifdef __cplusplus +} +#endif + +/* Check standard inlines */ +#ifndef MALI_STATIC_INLINE +#error MALI_STATIC_INLINE not defined on your OS +#endif + +#ifndef MALI_NON_STATIC_INLINE +#error MALI_NON_STATIC_INLINE not defined on your OS +#endif + +#endif /* __MALI_OSK_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_osk_bitops.h b/drivers/gpu/arm/utgard/common/mali_osk_bitops.h new file mode 100644 index 000000000000..c1709f94c883 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_osk_bitops.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_bitops.h + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#ifndef __MALI_OSK_BITOPS_H__ +#define __MALI_OSK_BITOPS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +MALI_STATIC_INLINE void _mali_internal_clear_bit(u32 bit, u32 *addr) +{ + MALI_DEBUG_ASSERT(bit < 32); + MALI_DEBUG_ASSERT(NULL != addr); + + (*addr) &= ~(1 << bit); +} + +MALI_STATIC_INLINE void _mali_internal_set_bit(u32 bit, u32 *addr) +{ + MALI_DEBUG_ASSERT(bit < 32); + MALI_DEBUG_ASSERT(NULL != addr); + + (*addr) |= (1 << bit); +} + +MALI_STATIC_INLINE u32 _mali_internal_test_bit(u32 bit, u32 value) +{ + MALI_DEBUG_ASSERT(bit < 32); + return value & (1 << bit); +} + +MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit(u32 value) +{ + u32 inverted; + u32 negated; + u32 isolated; + u32 leading_zeros; + + /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */ + inverted = ~value; /* zzz...z1000...0 */ + /* Using count_trailing_zeros on inverted value - + * See ARM System Developers Guide for details of count_trailing_zeros */ + + /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */ + negated = (u32) - inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */ + /* negated = xxx...x1000...0 */ + + isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */ + /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it + * Note that the output is zero if value was all 1s */ + + leading_zeros = _mali_osk_clz(isolated); + + return 31 - leading_zeros; +} + + +/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations + * @{ */ + +/** + * These bit-operations do not work atomically, and so locks must be used if + * atomicity is required. + * + * Reference implementations for Little Endian are provided, and so it should + * not normally be necessary to re-implement these. Efficient bit-twiddling + * techniques are used where possible, implemented in portable C. + * + * Note that these reference implementations rely on _mali_osk_clz() being + * implemented. + */ + +/** @brief Clear a bit in a sequence of 32-bit words + * @param nr bit number to clear, starting from the (Little-endian) least + * significant bit + * @param addr starting point for counting. + */ +MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit(u32 nr, u32 *addr) +{ + addr += nr >> 5; /* find the correct word */ + nr = nr & ((1 << 5) - 1); /* The bit number within the word */ + + _mali_internal_clear_bit(nr, addr); +} + +/** @brief Set a bit in a sequence of 32-bit words + * @param nr bit number to set, starting from the (Little-endian) least + * significant bit + * @param addr starting point for counting. + */ +MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit(u32 nr, u32 *addr) +{ + addr += nr >> 5; /* find the correct word */ + nr = nr & ((1 << 5) - 1); /* The bit number within the word */ + + _mali_internal_set_bit(nr, addr); +} + +/** @brief Test a bit in a sequence of 32-bit words + * @param nr bit number to test, starting from the (Little-endian) least + * significant bit + * @param addr starting point for counting. + * @return zero if bit was clear, non-zero if set. Do not rely on the return + * value being related to the actual word under test. + */ +MALI_STATIC_INLINE u32 _mali_osk_test_bit(u32 nr, u32 *addr) +{ + addr += nr >> 5; /* find the correct word */ + nr = nr & ((1 << 5) - 1); /* The bit number within the word */ + + return _mali_internal_test_bit(nr, *addr); +} + +/* Return maxbit if not found */ +/** @brief Find the first zero bit in a sequence of 32-bit words + * @param addr starting point for search. + * @param maxbit the maximum number of bits to search + * @return the number of the first zero bit found, or maxbit if none were found + * in the specified range. + */ +MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit(const u32 *addr, u32 maxbit) +{ + u32 total; + + for (total = 0; total < maxbit; total += 32, ++addr) { + int result; + result = _mali_internal_find_first_zero_bit(*addr); + + /* non-negative signifies the bit was found */ + if (result >= 0) { + total += (u32)result; + break; + } + } + + /* Now check if we reached maxbit or above */ + if (total >= maxbit) { + total = maxbit; + } + + return total; /* either the found bit nr, or maxbit if not found */ +} +/** @} */ /* end group _mali_osk_bitops */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_OSK_BITOPS_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_osk_list.h b/drivers/gpu/arm/utgard/common/mali_osk_list.h new file mode 100644 index 000000000000..22d22446d7a0 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_osk_list.h @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_list.h + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#ifndef __MALI_OSK_LIST_H__ +#define __MALI_OSK_LIST_H__ + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next) +{ + next->prev = new_entry; + new_entry->next = next; + new_entry->prev = prev; + prev->next = new_entry; +} + +MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next) +{ + next->prev = prev; + prev->next = next; +} + +/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists + * @{ */ + +/** Reference implementations of Doubly-linked Circular Lists are provided. + * There is often no need to re-implement these. + * + * @note The implementation may differ subtly from any lists the OS provides. + * For this reason, these lists should not be mixed with OS-specific lists + * inside the OSK/UKK implementation. */ + +/** @brief Initialize a list to be a head of an empty list + * @param exp the list to initialize. */ +#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp) + +/** @brief Define a list variable, which is uninitialized. + * @param exp the name of the variable that the list will be defined as. */ +#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp + +/** @brief Define a list variable, which is initialized. + * @param exp the name of the variable that the list will be defined as. */ +#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp } + +/** @brief Initialize a list element. + * + * All list elements must be initialized before use. + * + * Do not use on any list element that is present in a list without using + * _mali_osk_list_del first, otherwise this will break the list. + * + * @param list the list element to initialize + */ +MALI_STATIC_INLINE void _mali_osk_list_init(_mali_osk_list_t *list) +{ + list->next = list; + list->prev = list; +} + +/** @brief Insert a single list element after an entry in a list + * + * As an example, if this is inserted to the head of a list, then this becomes + * the first element of the list. + * + * Do not use to move list elements from one list to another, as it will break + * the originating list. + * + * + * @param newlist the list element to insert + * @param list the list in which to insert. The new element will be the next + * entry in this list + */ +MALI_STATIC_INLINE void _mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *list) +{ + __mali_osk_list_add(new_entry, list, list->next); +} + +/** @brief Insert a single list element before an entry in a list + * + * As an example, if this is inserted to the head of a list, then this becomes + * the last element of the list. + * + * Do not use to move list elements from one list to another, as it will break + * the originating list. + * + * @param newlist the list element to insert + * @param list the list in which to insert. The new element will be the previous + * entry in this list + */ +MALI_STATIC_INLINE void _mali_osk_list_addtail(_mali_osk_list_t *new_entry, _mali_osk_list_t *list) +{ + __mali_osk_list_add(new_entry, list->prev, list); +} + +/** @brief Remove a single element from a list + * + * The element will no longer be present in the list. The removed list element + * will be uninitialized, and so should not be traversed. It must be + * initialized before further use. + * + * @param list the list element to remove. + */ +MALI_STATIC_INLINE void _mali_osk_list_del(_mali_osk_list_t *list) +{ + __mali_osk_list_del(list->prev, list->next); +} + +/** @brief Remove a single element from a list, and re-initialize it + * + * The element will no longer be present in the list. The removed list element + * will initialized, and so can be used as normal. + * + * @param list the list element to remove and initialize. + */ +MALI_STATIC_INLINE void _mali_osk_list_delinit(_mali_osk_list_t *list) +{ + __mali_osk_list_del(list->prev, list->next); + _mali_osk_list_init(list); +} + +/** @brief Determine whether a list is empty. + * + * An empty list is one that contains a single element that points to itself. + * + * @param list the list to check. + * @return non-zero if the list is empty, and zero otherwise. + */ +MALI_STATIC_INLINE mali_bool _mali_osk_list_empty(_mali_osk_list_t *list) +{ + return list->next == list; +} + +/** @brief Move a list element from one list to another. + * + * The list element must be initialized. + * + * As an example, moving a list item to the head of a new list causes this item + * to be the first element in the new list. + * + * @param move the list element to move + * @param list the new list into which the element will be inserted, as the next + * element in the list. + */ +MALI_STATIC_INLINE void _mali_osk_list_move(_mali_osk_list_t *move_entry, _mali_osk_list_t *list) +{ + __mali_osk_list_del(move_entry->prev, move_entry->next); + _mali_osk_list_add(move_entry, list); +} + +/** @brief Move an entire list + * + * The list element must be initialized. + * + * Allows you to move a list from one list head to another list head + * + * @param old_list The existing list head + * @param new_list The new list head (must be an empty list) + */ +MALI_STATIC_INLINE void _mali_osk_list_move_list(_mali_osk_list_t *old_list, _mali_osk_list_t *new_list) +{ + MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list)); + if (!_mali_osk_list_empty(old_list)) { + new_list->next = old_list->next; + new_list->prev = old_list->prev; + new_list->next->prev = new_list; + new_list->prev->next = new_list; + old_list->next = old_list; + old_list->prev = old_list; + } +} + +/** @brief Find the containing structure of a list + * + * When traversing a list, this is used to recover the containing structure, + * given that is contains a _mali_osk_list_t member. + * + * Each list must be of structures of one type, and must link the same members + * together, otherwise it will not be possible to correctly recover the + * sturctures that the lists link. + * + * @note no type or memory checking occurs to ensure that a structure does in + * fact exist for the list entry, and that it is being recovered with respect + * to the correct list member. + * + * @param ptr the pointer to the _mali_osk_list_t member in this structure + * @param type the type of the structure that contains the member + * @param member the member of the structure that ptr points to. + * @return a pointer to a \a type object which contains the _mali_osk_list_t + * \a member, as pointed to by the _mali_osk_list_t \a *ptr. + */ +#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \ + _MALI_OSK_CONTAINER_OF(ptr, type, member) + +/** @brief Enumerate a list safely + * + * With this macro, lists can be enumerated in a 'safe' manner. That is, + * entries can be deleted from the list without causing an error during + * enumeration. To achieve this, a 'temporary' pointer is required, which must + * be provided to the macro. + * + * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be + * followed by a statement or compound-statement which will be executed for + * each list entry. + * + * Upon loop completion, providing that an early out was not taken in the + * loop body, then it is guaranteed that ptr->member == list, even if the loop + * body never executed. + * + * @param ptr a pointer to an object of type 'type', which points to the + * structure that contains the currently enumerated list entry. + * @param tmp a pointer to an object of type 'type', which must not be used + * inside the list-execution statement. + * @param list a pointer to a _mali_osk_list_t, from which enumeration will + * begin + * @param type the type of the structure that contains the _mali_osk_list_t + * member that is part of the list to be enumerated. + * @param member the _mali_osk_list_t member of the structure that is part of + * the list to be enumerated. + */ +#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \ + for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \ + tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \ + &ptr->member != (list); \ + ptr = tmp, \ + tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member)) + +/** @brief Enumerate a list in reverse order safely + * + * This macro is identical to @ref _MALI_OSK_LIST_FOREACHENTRY, except that + * entries are enumerated in reverse order. + * + * @param ptr a pointer to an object of type 'type', which points to the + * structure that contains the currently enumerated list entry. + * @param tmp a pointer to an object of type 'type', which must not be used + * inside the list-execution statement. + * @param list a pointer to a _mali_osk_list_t, from which enumeration will + * begin + * @param type the type of the structure that contains the _mali_osk_list_t + * member that is part of the list to be enumerated. + * @param member the _mali_osk_list_t member of the structure that is part of + * the list to be enumerated. + */ +#define _MALI_OSK_LIST_FOREACHENTRY_REVERSE(ptr, tmp, list, type, member) \ + for (ptr = _MALI_OSK_LIST_ENTRY((list)->prev, type, member), \ + tmp = _MALI_OSK_LIST_ENTRY(ptr->member.prev, type, member); \ + &ptr->member != (list); \ + ptr = tmp, \ + tmp = _MALI_OSK_LIST_ENTRY(tmp->member.prev, type, member)) + +/** @} */ /* end group _mali_osk_list */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_OSK_LIST_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_osk_mali.h b/drivers/gpu/arm/utgard/common/mali_osk_mali.h new file mode 100644 index 000000000000..b27fb7dd36ed --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_osk_mali.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_mali.h + * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK) + */ + +#ifndef __MALI_OSK_MALI_H__ +#define __MALI_OSK_MALI_H__ + +#include <linux/mali/mali_utgard.h> +#include <mali_osk.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** @addtogroup _mali_osk_miscellaneous + * @{ */ + +/** @brief Struct with device specific configuration data + */ +typedef struct mali_gpu_device_data _mali_osk_device_data; + +#if defined(CONFIG_MALI_DT) && !defined(CONFIG_MALI_PLAT_SPECIFIC_DT) +/** @brief Initialize those device resources when we use device tree + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_resource_initialize(void); +#endif + +/** @brief Find Mali GPU HW resource + * + * @param addr Address of Mali GPU resource to find + * @param res Storage for resource information if resource is found. + * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found + */ +_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res); + + +/** @brief Find Mali GPU HW base address + * + * @return 0 if resources are found, otherwise the Mali GPU component with lowest address. + */ +uintptr_t _mali_osk_resource_base_address(void); + +/** @brief Find the specific GPU resource. + * + * @return value + * 0x400 if Mali 400 specific GPU resource identified + * 0x450 if Mali 450 specific GPU resource identified + * 0x470 if Mali 470 specific GPU resource identified + * + */ +u32 _mali_osk_identify_gpu_resource(void); + +/** @brief Retrieve the Mali GPU specific data + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data); + +/** @brief Find the pmu domain config from device data. + * + * @param domain_config_array used to store pmu domain config found in device data. + * @param array_size is the size of array domain_config_array. + */ +void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size); + +/** @brief Get Mali PMU switch delay + * + *@return pmu switch delay if it is configured + */ +u32 _mali_osk_get_pmu_switch_delay(void); + +/** @brief Determines if Mali GPU has been configured with shared interrupts. + * + * @return MALI_TRUE if shared interrupts, MALI_FALSE if not. + */ +mali_bool _mali_osk_shared_interrupts(void); + +/** @} */ /* end group _mali_osk_miscellaneous */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_OSK_MALI_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_osk_profiling.h b/drivers/gpu/arm/utgard/common/mali_osk_profiling.h new file mode 100644 index 000000000000..10f4dc552b03 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_osk_profiling.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_OSK_PROFILING_H__ +#define __MALI_OSK_PROFILING_H__ + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) + +#include "mali_linux_trace.h" +#include "mali_profiling_events.h" +#include "mali_profiling_gator_api.h" + +#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576 + +#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1) + +/** @defgroup _mali_osk_profiling External profiling connectivity + * @{ */ + +/** + * Initialize the profiling module. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start); + +/* + * Terminate the profiling module. + */ +void _mali_osk_profiling_term(void); + +/** + * Stop the profile sampling operation. + */ +void _mali_osk_profiling_stop_sampling(u32 pid); + +/** + * Start recording profiling data + * + * The specified limit will determine how large the capture buffer is. + * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver. + * + * @param limit The desired maximum number of events to record on input, the actual maximum on output. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit); + +/** + * Add an profiling event + * + * @param event_id The event identificator. + * @param data0 First data parameter, depending on event_id specified. + * @param data1 Second data parameter, depending on event_id specified. + * @param data2 Third data parameter, depending on event_id specified. + * @param data3 Fourth data parameter, depending on event_id specified. + * @param data4 Fifth data parameter, depending on event_id specified. + */ +void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4); + +/** + * Report a hardware counter event. + * + * @param counter_id The ID of the counter. + * @param value The value of the counter. + */ + +/* Call Linux tracepoint directly */ +#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value) + +/** + * Report SW counters + * + * @param counters array of counter values + */ +void _mali_osk_profiling_report_sw_counters(u32 *counters); + +void _mali_osk_profiling_record_global_counters(int counter_id, u32 value); + +/** + * Stop recording profiling data + * + * @param count Returns the number of recorded events. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count); + +/** + * Retrieves the number of events that can be retrieved + * + * @return The number of recorded events that can be retrieved. + */ +u32 _mali_osk_profiling_get_count(void); + +/** + * Retrieve an event + * + * @param index Event index (start with 0 and continue until this function fails to retrieve all events) + * @param timestamp The timestamp for the retrieved event will be stored here. + * @param event_id The event ID for the retrieved event will be stored here. + * @param data The 5 data values for the retrieved event will be stored here. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]); + +/** + * Clear the recorded buffer. + * + * This is needed in order to start another recording. + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_clear(void); + +/** + * Checks if a recording of profiling data is in progress + * + * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not + */ +mali_bool _mali_osk_profiling_is_recording(void); + +/** + * Checks if profiling data is available for retrival + * + * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not + */ +mali_bool _mali_osk_profiling_have_recording(void); + +/** @} */ /* end group _mali_osk_profiling */ + +#else /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */ + +/* Dummy add_event, for when profiling is disabled. */ + +#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) + +#endif /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */ + +#endif /* __MALI_OSK_PROFILING_H__ */ + + diff --git a/drivers/gpu/arm/utgard/common/mali_osk_types.h b/drivers/gpu/arm/utgard/common/mali_osk_types.h new file mode 100644 index 000000000000..b65ad29e16c0 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_osk_types.h @@ -0,0 +1,471 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_types.h + * Defines types of the OS abstraction layer for the kernel device driver (OSK) + */ + +#ifndef __MALI_OSK_TYPES_H__ +#define __MALI_OSK_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup uddapi Unified Device Driver (UDD) APIs + * + * @{ + */ + +/** + * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs + * + * @{ + */ + +/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types + * @{ */ + +/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */ +#ifndef __KERNEL__ +typedef unsigned char u8; +typedef signed char s8; +typedef unsigned short u16; +typedef signed short s16; +typedef unsigned int u32; +typedef signed int s32; +typedef unsigned long long u64; +#define BITS_PER_LONG (sizeof(long)*8) +#else +/* Ensure Linux types u32, etc. are defined */ +#include <linux/types.h> +#endif + +/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE + */ +typedef unsigned long mali_bool; + +#ifndef MALI_TRUE +#define MALI_TRUE ((mali_bool)1) +#endif + +#ifndef MALI_FALSE +#define MALI_FALSE ((mali_bool)0) +#endif + +#define MALI_HW_CORE_NO_COUNTER ((u32)-1) + + +#define MALI_S32_MAX 0x7fffffff + +/** + * @brief OSK Error codes + * + * Each OS may use its own set of error codes, and may require that the + * User/Kernel interface take certain error code. This means that the common + * error codes need to be sufficiently rich to pass the correct error code + * thorugh from the OSK to U/K layer, across all OSs. + * + * The result is that some error codes will appear redundant on some OSs. + * Under all OSs, the OSK layer must translate native OS error codes to + * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from + * _mali_osk_errcode_t codes to native OS error codes. + */ +typedef enum { + _MALI_OSK_ERR_OK = 0, /**< Success. */ + _MALI_OSK_ERR_FAULT = -1, /**< General non-success */ + _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */ + _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */ + _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */ + _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */ + _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */ + _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */ + _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */ + _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */ +} _mali_osk_errcode_t; + +/** @} */ /* end group _mali_osk_miscellaneous */ + +/** @defgroup _mali_osk_wq OSK work queues + * @{ */ + +/** @brief Private type for work objects */ +typedef struct _mali_osk_wq_work_s _mali_osk_wq_work_t; +typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t; + +/** @brief Work queue handler function + * + * This function type is called when the work is scheduled by the work queue, + * e.g. as an IRQ bottom-half handler. + * + * Refer to \ref _mali_osk_wq_schedule_work() for more information on the + * work-queue and work handlers. + * + * @param arg resource-specific data + */ +typedef void (*_mali_osk_wq_work_handler_t)(void *arg); + +/* @} */ /* end group _mali_osk_wq */ + +/** @defgroup _mali_osk_irq OSK IRQ handling + * @{ */ + +/** @brief Private type for IRQ handling objects */ +typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t; + +/** @brief Optional function to trigger an irq from a resource + * + * This function is implemented by the common layer to allow probing of a resource's IRQ. + * @param arg resource-specific data */ +typedef void (*_mali_osk_irq_trigger_t)(void *arg); + +/** @brief Optional function to acknowledge an irq from a resource + * + * This function is implemented by the common layer to allow probing of a resource's IRQ. + * @param arg resource-specific data + * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */ +typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)(void *arg); + +/** @brief IRQ 'upper-half' handler callback. + * + * This function is implemented by the common layer to do the initial handling of a + * resource's IRQ. This maps on to the concept of an ISR that does the minimum + * work necessary before handing off to an IST. + * + * The communication of the resource-specific data from the ISR to the IST is + * handled by the OSK implementation. + * + * On most systems, the IRQ upper-half handler executes in IRQ context. + * Therefore, the system may have restrictions about what can be done in this + * context + * + * If an IRQ upper-half handler requires more work to be done than can be + * acheived in an IRQ context, then it may defer the work with + * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for + * more information. + * + * @param arg resource-specific data + * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable + * _mali_osk_errcode_t otherwise. + */ +typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)(void *arg); + + +/** @} */ /* end group _mali_osk_irq */ + + +/** @defgroup _mali_osk_atomic OSK Atomic counters + * @{ */ + +/** @brief Public type of atomic counters + * + * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value. + * On others, it could be encapsulating an object stored elsewhere. + * + * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used + * for all accesses to the variable's value, even if atomicity is not required. + * Do not access u.val or u.obj directly. + */ +typedef struct { + union { + u32 val; + void *obj; + } u; +} _mali_osk_atomic_t; +/** @} */ /* end group _mali_osk_atomic */ + + +/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks + * @{ */ + + +/** @brief OSK Mutual Exclusion Lock ordered list + * + * This lists the various types of locks in the system and is used to check + * that locks are taken in the correct order. + * + * - Holding more than one lock of the same order at the same time is not + * allowed. + * - Taking a lock of a lower order than the highest-order lock currently held + * is not allowed. + * + */ +typedef enum { + /* || Locks || */ + /* || must be || */ + /* _||_ taken in _||_ */ + /* \ / this \ / */ + /* \/ order! \/ */ + + _MALI_OSK_LOCK_ORDER_FIRST = 0, + + _MALI_OSK_LOCK_ORDER_SESSIONS, + _MALI_OSK_LOCK_ORDER_MEM_SESSION, + _MALI_OSK_LOCK_ORDER_MEM_INFO, + _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE, + _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP, + _MALI_OSK_LOCK_ORDER_PM_EXECUTION, + _MALI_OSK_LOCK_ORDER_EXECUTOR, + _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM, + _MALI_OSK_LOCK_ORDER_SCHEDULER, + _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED, + _MALI_OSK_LOCK_ORDER_PROFILING, + _MALI_OSK_LOCK_ORDER_L2, + _MALI_OSK_LOCK_ORDER_L2_COMMAND, + _MALI_OSK_LOCK_ORDER_UTILIZATION, + _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS, + _MALI_OSK_LOCK_ORDER_PM_STATE, + + _MALI_OSK_LOCK_ORDER_LAST, +} _mali_osk_lock_order_t; + + +/** @brief OSK Mutual Exclusion Lock flags type + * + * - Any lock can use the order parameter. + */ +typedef enum { + _MALI_OSK_LOCKFLAG_UNORDERED = 0x1, /**< Indicate that the order of this lock should not be checked */ + _MALI_OSK_LOCKFLAG_ORDERED = 0x2, + /** @enum _mali_osk_lock_flags_t + * + * Flags from 0x10000--0x80000000 are RESERVED for User-mode */ + +} _mali_osk_lock_flags_t; + +/** @brief Mutual Exclusion Lock Mode Optimization hint + * + * The lock mode is used to implement the read/write locking of locks when we call + * functions _mali_osk_mutex_rw_init/wait/signal/term/. In this case, the RO mode can + * be used to allow multiple concurrent readers, but no writers. The RW mode is used for + * writers, and so will wait for all readers to release the lock (if any present). + * Further readers and writers will wait until the writer releases the lock. + * + * The mode is purely an optimization hint: for example, it is permissible for + * all locks to behave in RW mode, regardless of that supplied. + * + * It is an error to attempt to use locks in anything other that RW mode when + * call functions _mali_osk_mutex_rw_wait/signal(). + * + */ +typedef enum { + _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */ + _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */ + _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */ + /** @enum _mali_osk_lock_mode_t + * + * Lock modes 0x40--0x7F are RESERVED for User-mode */ +} _mali_osk_lock_mode_t; + +/** @brief Private types for Mutual Exclusion lock objects */ +typedef struct _mali_osk_lock_debug_s _mali_osk_lock_debug_t; +typedef struct _mali_osk_spinlock_s _mali_osk_spinlock_t; +typedef struct _mali_osk_spinlock_irq_s _mali_osk_spinlock_irq_t; +typedef struct _mali_osk_mutex_s _mali_osk_mutex_t; +typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t; + +/** @} */ /* end group _mali_osk_lock */ + +/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations + * @{ */ + +/** + * @brief Private data type for use in IO accesses to/from devices. + * + * This represents some range that is accessible from the device. Examples + * include: + * - Device Registers, which could be readable and/or writeable. + * - Memory that the device has access to, for storing configuration structures. + * + * Access to this range must be made through the _mali_osk_mem_ioread32() and + * _mali_osk_mem_iowrite32() functions. + */ +typedef struct _mali_io_address *mali_io_address; + +/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros. + * + * The order of the page size is supplied for + * ease of use by algorithms that might require it, since it is easier to know + * it ahead of time rather than calculating it. + * + * The Mali Page Mask macro masks off the lower bits of a physical address to + * give the start address of the page for that physical address. + * + * @note The Mali device driver code is designed for systems with 4KB page size. + * Changing these macros will not make the entire Mali device driver work with + * page sizes other than 4KB. + * + * @note The CPU Physical Page Size has been assumed to be the same as the Mali + * Physical Page Size. + * + * @{ + */ + +/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */ +#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12) +/** CPU Page Size, in bytes. */ +#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) +/** CPU Page Mask, which masks off the offset within a page */ +#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1))) +/** @} */ /* end of group _MALI_OSK_CPU_PAGE */ + +/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros + * + * Mali Physical page size macros. The order of the page size is supplied for + * ease of use by algorithms that might require it, since it is easier to know + * it ahead of time rather than calculating it. + * + * The Mali Page Mask macro masks off the lower bits of a physical address to + * give the start address of the page for that physical address. + * + * @note The Mali device driver code is designed for systems with 4KB page size. + * Changing these macros will not make the entire Mali device driver work with + * page sizes other than 4KB. + * + * @note The Mali Physical Page Size has been assumed to be the same as the CPU + * Physical Page Size. + * + * @{ + */ + +/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */ +#define _MALI_OSK_MALI_PAGE_ORDER PAGE_SHIFT +/** Mali Page Size, in bytes. */ +#define _MALI_OSK_MALI_PAGE_SIZE PAGE_SIZE +/** Mali Page Mask, which masks off the offset within a page */ +#define _MALI_OSK_MALI_PAGE_MASK PAGE_MASK +/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/ + +/** @brief flags for mapping a user-accessible memory range + * + * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one + * of the function parameters, it will use one of these. These allow per-page + * control over mappings. Compare with the mali_memory_allocation_flag type, + * which acts over an entire range + * + * These may be OR'd together with bitwise OR (|), but must be cast back into + * the type after OR'ing. + */ +typedef enum { + _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */ +} _mali_osk_mem_mapregion_flags_t; +/** @} */ /* end group _mali_osk_low_level_memory */ + +/** @defgroup _mali_osk_notification OSK Notification Queues + * @{ */ + +/** @brief Private type for notification queue objects */ +typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t; + +/** @brief Public notification data object type */ +typedef struct _mali_osk_notification_t_struct { + u32 notification_type; /**< The notification type */ + u32 result_buffer_size; /**< Size of the result buffer to copy to user space */ + void *result_buffer; /**< Buffer containing any type specific data */ +} _mali_osk_notification_t; + +/** @} */ /* end group _mali_osk_notification */ + + +/** @defgroup _mali_osk_timer OSK Timer Callbacks + * @{ */ + +/** @brief Function to call when a timer expires + * + * When a timer expires, this function is called. Note that on many systems, + * a timer callback will be executed in IRQ context. Therefore, restrictions + * may apply on what can be done inside the timer callback. + * + * If a timer requires more work to be done than can be acheived in an IRQ + * context, then it may defer the work with a work-queue. For example, it may + * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler + * to carry out the remaining work. + * + * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of + * the callback. Therefore, the callback may not obtain any mutexes also held + * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur. + * + * @param arg Function-specific data */ +typedef void (*_mali_osk_timer_callback_t)(void *arg); + +/** @brief Private type for Timer Callback Objects */ +typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t; +/** @} */ /* end group _mali_osk_timer */ + + +/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists + * @{ */ + +/** @brief Public List objects. + * + * To use, add a _mali_osk_list_t member to the structure that may become part + * of a list. When traversing the _mali_osk_list_t objects, use the + * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its + *_mali_osk_list_t member + * + * Each structure may have multiple _mali_osk_list_t members, so that the + * structure is part of multiple lists. When traversing lists, ensure that the + * correct _mali_osk_list_t member is used, because type-checking will be + * lost by the compiler. + */ +typedef struct _mali_osk_list_s { + struct _mali_osk_list_s *next; + struct _mali_osk_list_s *prev; +} _mali_osk_list_t; +/** @} */ /* end group _mali_osk_list */ + +/** @addtogroup _mali_osk_miscellaneous + * @{ */ + +/** @brief resource description struct + * + * Platform independent representation of a Mali HW resource + */ +typedef struct _mali_osk_resource { + const char *description; /**< short description of the resource */ + uintptr_t base; /**< Physical base address of the resource, as seen by Mali resources. */ + const char *irq_name; /**< Name of irq belong to this resource */ + u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */ +} _mali_osk_resource_t; +/** @} */ /* end group _mali_osk_miscellaneous */ + +/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality + * @{ */ +/** @brief Private type for wait queue objects */ +typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t; +/** @} */ /* end group _mali_osk_wait_queue */ + +/** @} */ /* end group osuapi */ + +/** @} */ /* end group uddapi */ + +/** @brief Mali print ctx type which uses seq_file + */ +typedef struct seq_file _mali_osk_print_ctx; + +#define _MALI_OSK_BITMAP_INVALIDATE_INDEX -1 + +typedef struct _mali_osk_bitmap { + u32 reserve; + u32 last; + u32 max; + u32 avail; + _mali_osk_spinlock_t *lock; + unsigned long *table; +} _mali_osk_bitmap_t; + + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_OSK_TYPES_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_pm.c b/drivers/gpu/arm/utgard/common/mali_pm.c new file mode 100644 index 000000000000..dbd94d310741 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pm.c @@ -0,0 +1,1362 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_pm.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_scheduler.h" +#include "mali_group.h" +#include "mali_pm_domain.h" +#include "mali_pmu.h" + +#include "mali_executor.h" +#include "mali_control_timer.h" + +#if defined(DEBUG) +u32 num_pm_runtime_resume = 0; +u32 num_pm_updates = 0; +u32 num_pm_updates_up = 0; +u32 num_pm_updates_down = 0; +#endif + +#define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY) + +/* lock protecting power state (including pm_domains) */ +static _mali_osk_spinlock_irq_t *pm_lock_state = NULL; + +/* the wanted domain mask (protected by pm_lock_state) */ +static u32 pd_mask_wanted = 0; + +/* used to deferring the actual power changes */ +static _mali_osk_wq_work_t *pm_work = NULL; + +/* lock protecting power change execution */ +static _mali_osk_mutex_t *pm_lock_exec = NULL; + +/* PMU domains which are actually powered on (protected by pm_lock_exec) */ +static u32 pmu_mask_current = 0; + +/* + * domains which marked as powered on (protected by pm_lock_exec) + * This can be different from pmu_mask_current right after GPU power on + * if the PMU domains default to powered up. + */ +static u32 pd_mask_current = 0; + +static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1 << MALI_DOMAIN_INDEX_DUMMY +}; + +/* The relative core power cost */ +#define MALI_GP_COST 3 +#define MALI_PP_COST 6 +#define MALI_L2_COST 1 + +/* + *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix + *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8. + */ +static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS]; +/* + * Keep track of runtime PM state, so that we know + * how to resume during OS resume. + */ +#ifdef CONFIG_PM_RUNTIME +static mali_bool mali_pm_runtime_active = MALI_FALSE; +#else +/* when kernel don't enable PM_RUNTIME, set the flag always true, + * for GPU will not power off by runtime */ +static mali_bool mali_pm_runtime_active = MALI_TRUE; +#endif + +static void mali_pm_state_lock(void); +static void mali_pm_state_unlock(void); +static _mali_osk_errcode_t mali_pm_create_pm_domains(void); +static void mali_pm_set_pmu_domain_config(void); +static u32 mali_pm_get_registered_cores_mask(void); +static void mali_pm_update_sync_internal(void); +static mali_bool mali_pm_common_suspend(void); +static void mali_pm_update_work(void *data); +#if defined(DEBUG) +const char *mali_pm_mask_to_string(u32 mask); +const char *mali_pm_group_stats_to_string(void); +#endif + +_mali_osk_errcode_t mali_pm_initialize(void) +{ + _mali_osk_errcode_t err; + struct mali_pmu_core *pmu; + + pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_PM_STATE); + if (NULL == pm_lock_state) { + mali_pm_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_PM_STATE); + if (NULL == pm_lock_exec) { + mali_pm_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL); + if (NULL == pm_work) { + mali_pm_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + pmu = mali_pmu_get_global_pmu_core(); + if (NULL != pmu) { + /* + * We have a Mali PMU, set the correct domain + * configuration (default or custom) + */ + + u32 registered_cores_mask; + + mali_pm_set_pmu_domain_config(); + + registered_cores_mask = mali_pm_get_registered_cores_mask(); + mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask); + + MALI_DEBUG_ASSERT(0 == pd_mask_wanted); + } + + /* Create all power domains needed (at least one dummy domain) */ + err = mali_pm_create_pm_domains(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_terminate(); + return err; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_pm_terminate(void) +{ + if (NULL != pm_work) { + _mali_osk_wq_delete_work(pm_work); + pm_work = NULL; + } + + mali_pm_domain_terminate(); + + if (NULL != pm_lock_exec) { + _mali_osk_mutex_term(pm_lock_exec); + pm_lock_exec = NULL; + } + + if (NULL != pm_lock_state) { + _mali_osk_spinlock_irq_term(pm_lock_state); + pm_lock_state = NULL; + } +} + +struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index, + struct mali_l2_cache_core *l2_cache) +{ + struct mali_pm_domain *domain; + + domain = mali_pm_domain_get_from_mask(domain_config[domain_index]); + if (NULL == domain) { + MALI_DEBUG_ASSERT(0 == domain_config[domain_index]); + domain = mali_pm_domain_get_from_index( + MALI_DOMAIN_INDEX_DUMMY); + domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK; + } else { + MALI_DEBUG_ASSERT(0 != domain_config[domain_index]); + } + + MALI_DEBUG_ASSERT(NULL != domain); + + mali_pm_domain_add_l2_cache(domain, l2_cache); + + return domain; /* return the actual domain this was registered in */ +} + +struct mali_pm_domain *mali_pm_register_group(u32 domain_index, + struct mali_group *group) +{ + struct mali_pm_domain *domain; + + domain = mali_pm_domain_get_from_mask(domain_config[domain_index]); + if (NULL == domain) { + MALI_DEBUG_ASSERT(0 == domain_config[domain_index]); + domain = mali_pm_domain_get_from_index( + MALI_DOMAIN_INDEX_DUMMY); + domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK; + } else { + MALI_DEBUG_ASSERT(0 != domain_config[domain_index]); + } + + MALI_DEBUG_ASSERT(NULL != domain); + + mali_pm_domain_add_group(domain, group); + + return domain; /* return the actual domain this was registered in */ +} + +mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains, + struct mali_group **groups, + u32 num_domains) +{ + mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */ + u32 i; + + mali_pm_state_lock(); + + for (i = 0; i < num_domains; i++) { + MALI_DEBUG_ASSERT_POINTER(domains[i]); + pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]); + if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) { + /* + * Tell caller that the corresponding group + * was not already powered on. + */ + ret = MALI_FALSE; + } else { + /* + * There is a time gap between we power on the domain and + * set the power state of the corresponding groups to be on. + */ + if (NULL != groups[i] && + MALI_FALSE == mali_group_power_is_on(groups[i])) { + ret = MALI_FALSE; + } + } + } + + MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted)); + + mali_pm_state_unlock(); + + return ret; +} + +mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains, + u32 num_domains) +{ + u32 mask = 0; + mali_bool ret; + u32 i; + + mali_pm_state_lock(); + + for (i = 0; i < num_domains; i++) { + MALI_DEBUG_ASSERT_POINTER(domains[i]); + mask |= mali_pm_domain_ref_put(domains[i]); + } + + if (0 == mask) { + /* return false, all domains should still stay on */ + ret = MALI_FALSE; + } else { + /* Assert that we are dealing with a change */ + MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask); + + /* Update our desired domain mask */ + pd_mask_wanted &= ~mask; + + /* return true; one or more domains can now be powered down */ + ret = MALI_TRUE; + } + + MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted)); + + mali_pm_state_unlock(); + + return ret; +} + +void mali_pm_init_begin(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + _mali_osk_pm_dev_ref_get_sync(); + + /* Ensure all PMU domains are on */ + if (NULL != pmu) { + mali_pmu_power_up_all(pmu); + } +} + +void mali_pm_init_end(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + /* Ensure all PMU domains are off */ + if (NULL != pmu) { + mali_pmu_power_down_all(pmu); + } + + _mali_osk_pm_dev_ref_put(); +} + +void mali_pm_update_sync(void) +{ + mali_pm_exec_lock(); + + if (MALI_TRUE == mali_pm_runtime_active) { + /* + * Only update if GPU is powered on. + * Deactivation of the last group will result in both a + * deferred runtime PM suspend operation and + * deferred execution of this function. + * mali_pm_runtime_active will be false if runtime PM + * executed first and thus the GPU is now fully powered off. + */ + mali_pm_update_sync_internal(); + } + + mali_pm_exec_unlock(); +} + +void mali_pm_update_async(void) +{ + _mali_osk_wq_schedule_work(pm_work); +} + +void mali_pm_os_suspend(mali_bool os_suspend) +{ + int ret; + + MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n")); + + /* Suspend execution of all jobs, and go to inactive state */ + mali_executor_suspend(); + + if (os_suspend) { + mali_control_timer_suspend(MALI_TRUE); + } + + mali_pm_exec_lock(); + + ret = mali_pm_common_suspend(); + + MALI_DEBUG_ASSERT(MALI_TRUE == ret); + MALI_IGNORE(ret); + + mali_pm_exec_unlock(); +} + +void mali_pm_os_resume(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n")); + + mali_pm_exec_lock(); + +#if defined(DEBUG) + mali_pm_state_lock(); + + /* Assert that things are as we left them in os_suspend(). */ + MALI_DEBUG_ASSERT(0 == pd_mask_wanted); + MALI_DEBUG_ASSERT(0 == pd_mask_current); + MALI_DEBUG_ASSERT(0 == pmu_mask_current); + + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused()); + + mali_pm_state_unlock(); +#endif + + if (MALI_TRUE == mali_pm_runtime_active) { + /* Runtime PM was active, so reset PMU */ + if (NULL != pmu) { + mali_pmu_reset(pmu); + pmu_mask_current = mali_pmu_get_mask(pmu); + + MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current)); + } + + mali_pm_update_sync_internal(); + } + + mali_pm_exec_unlock(); + + /* Start executing jobs again */ + mali_executor_resume(); +} + +mali_bool mali_pm_runtime_suspend(void) +{ + mali_bool ret; + + MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n")); + + mali_pm_exec_lock(); + + /* + * Put SW state directly into "off" state, and do not bother to power + * down each power domain, because entire GPU will be powered off + * when we return. + * For runtime PM suspend, in contrast to OS suspend, there is a race + * between this function and the mali_pm_update_sync_internal(), which + * is fine... + */ + ret = mali_pm_common_suspend(); + if (MALI_TRUE == ret) { + mali_pm_runtime_active = MALI_FALSE; + } else { + /* + * Process the "power up" instead, + * which could have been "lost" + */ + mali_pm_update_sync_internal(); + } + + mali_pm_exec_unlock(); + + return ret; +} + +void mali_pm_runtime_resume(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + mali_pm_exec_lock(); + + mali_pm_runtime_active = MALI_TRUE; + +#if defined(DEBUG) + ++num_pm_runtime_resume; + + mali_pm_state_lock(); + + /* + * Assert that things are as we left them in runtime_suspend(), + * except for pd_mask_wanted which normally will be the reason we + * got here (job queued => domains wanted) + */ + MALI_DEBUG_ASSERT(0 == pd_mask_current); + MALI_DEBUG_ASSERT(0 == pmu_mask_current); + + mali_pm_state_unlock(); +#endif + + if (NULL != pmu) { + mali_pmu_reset(pmu); + pmu_mask_current = mali_pmu_get_mask(pmu); + MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current)); + } + + /* + * Normally we are resumed because a job has just been queued. + * pd_mask_wanted should thus be != 0. + * It is however possible for others to take a Mali Runtime PM ref + * without having a job queued. + * We should however always call mali_pm_update_sync_internal(), + * because this will take care of any potential mismatch between + * pmu_mask_current and pd_mask_current. + */ + mali_pm_update_sync_internal(); + + mali_pm_exec_unlock(); +} + +#if MALI_STATE_TRACKING +u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain, + char *buf, u32 size) +{ + int n = 0; + + n += _mali_osk_snprintf(buf + n, size - n, + "\tPower domain: id %u\n", + mali_pm_domain_get_id(domain)); + + n += _mali_osk_snprintf(buf + n, size - n, + "\t\tMask: 0x%04x\n", + mali_pm_domain_get_mask(domain)); + + n += _mali_osk_snprintf(buf + n, size - n, + "\t\tUse count: %u\n", + mali_pm_domain_get_use_count(domain)); + + n += _mali_osk_snprintf(buf + n, size - n, + "\t\tCurrent power state: %s\n", + (mali_pm_domain_get_mask(domain) & pd_mask_current) ? + "On" : "Off"); + + n += _mali_osk_snprintf(buf + n, size - n, + "\t\tWanted power state: %s\n", + (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ? + "On" : "Off"); + + return n; +} +#endif + +static void mali_pm_state_lock(void) +{ + _mali_osk_spinlock_irq_lock(pm_lock_state); +} + +static void mali_pm_state_unlock(void) +{ + _mali_osk_spinlock_irq_unlock(pm_lock_state); +} + +void mali_pm_exec_lock(void) +{ + _mali_osk_mutex_wait(pm_lock_exec); +} + +void mali_pm_exec_unlock(void) +{ + _mali_osk_mutex_signal(pm_lock_exec); +} + +static void mali_pm_domain_power_up(u32 power_up_mask, + struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS], + u32 *num_groups_up, + struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES], + u32 *num_l2_up) +{ + u32 domain_bit; + u32 notify_mask = power_up_mask; + + MALI_DEBUG_ASSERT(0 != power_up_mask); + MALI_DEBUG_ASSERT_POINTER(groups_up); + MALI_DEBUG_ASSERT_POINTER(num_groups_up); + MALI_DEBUG_ASSERT(0 == *num_groups_up); + MALI_DEBUG_ASSERT_POINTER(l2_up); + MALI_DEBUG_ASSERT_POINTER(num_l2_up); + MALI_DEBUG_ASSERT(0 == *num_l2_up); + + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state); + + MALI_DEBUG_PRINT(5, + ("PM update: Powering up domains: . [%s]\n", + mali_pm_mask_to_string(power_up_mask))); + + pd_mask_current |= power_up_mask; + + domain_bit = _mali_osk_fls(notify_mask); + while (0 != domain_bit) { + u32 domain_id = domain_bit - 1; + struct mali_pm_domain *domain = + mali_pm_domain_get_from_index( + domain_id); + struct mali_l2_cache_core *l2_cache; + struct mali_l2_cache_core *l2_cache_tmp; + struct mali_group *group; + struct mali_group *group_tmp; + + /* Mark domain as powered up */ + mali_pm_domain_set_power_on(domain, MALI_TRUE); + + /* + * Make a note of the L2 and/or group(s) to notify + * (need to release the PM state lock before doing so) + */ + + _MALI_OSK_LIST_FOREACHENTRY(l2_cache, + l2_cache_tmp, + mali_pm_domain_get_l2_cache_list( + domain), + struct mali_l2_cache_core, + pm_domain_list) { + MALI_DEBUG_ASSERT(*num_l2_up < + MALI_MAX_NUMBER_OF_L2_CACHE_CORES); + l2_up[*num_l2_up] = l2_cache; + (*num_l2_up)++; + } + + _MALI_OSK_LIST_FOREACHENTRY(group, + group_tmp, + mali_pm_domain_get_group_list(domain), + struct mali_group, + pm_domain_list) { + MALI_DEBUG_ASSERT(*num_groups_up < + MALI_MAX_NUMBER_OF_GROUPS); + groups_up[*num_groups_up] = group; + + (*num_groups_up)++; + } + + /* Remove current bit and find next */ + notify_mask &= ~(1 << (domain_id)); + domain_bit = _mali_osk_fls(notify_mask); + } +} +static void mali_pm_domain_power_down(u32 power_down_mask, + struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS], + u32 *num_groups_down, + struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES], + u32 *num_l2_down) +{ + u32 domain_bit; + u32 notify_mask = power_down_mask; + + MALI_DEBUG_ASSERT(0 != power_down_mask); + MALI_DEBUG_ASSERT_POINTER(groups_down); + MALI_DEBUG_ASSERT_POINTER(num_groups_down); + MALI_DEBUG_ASSERT(0 == *num_groups_down); + MALI_DEBUG_ASSERT_POINTER(l2_down); + MALI_DEBUG_ASSERT_POINTER(num_l2_down); + MALI_DEBUG_ASSERT(0 == *num_l2_down); + + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state); + + MALI_DEBUG_PRINT(5, + ("PM update: Powering down domains: [%s]\n", + mali_pm_mask_to_string(power_down_mask))); + + pd_mask_current &= ~power_down_mask; + + domain_bit = _mali_osk_fls(notify_mask); + while (0 != domain_bit) { + u32 domain_id = domain_bit - 1; + struct mali_pm_domain *domain = + mali_pm_domain_get_from_index(domain_id); + struct mali_l2_cache_core *l2_cache; + struct mali_l2_cache_core *l2_cache_tmp; + struct mali_group *group; + struct mali_group *group_tmp; + + /* Mark domain as powered down */ + mali_pm_domain_set_power_on(domain, MALI_FALSE); + + /* + * Make a note of the L2s and/or groups to notify + * (need to release the PM state lock before doing so) + */ + + _MALI_OSK_LIST_FOREACHENTRY(l2_cache, + l2_cache_tmp, + mali_pm_domain_get_l2_cache_list(domain), + struct mali_l2_cache_core, + pm_domain_list) { + MALI_DEBUG_ASSERT(*num_l2_down < + MALI_MAX_NUMBER_OF_L2_CACHE_CORES); + l2_down[*num_l2_down] = l2_cache; + (*num_l2_down)++; + } + + _MALI_OSK_LIST_FOREACHENTRY(group, + group_tmp, + mali_pm_domain_get_group_list(domain), + struct mali_group, + pm_domain_list) { + MALI_DEBUG_ASSERT(*num_groups_down < + MALI_MAX_NUMBER_OF_GROUPS); + groups_down[*num_groups_down] = group; + (*num_groups_down)++; + } + + /* Remove current bit and find next */ + notify_mask &= ~(1 << (domain_id)); + domain_bit = _mali_osk_fls(notify_mask); + } +} + +/* + * Execute pending power domain changes + * pm_lock_exec lock must be taken by caller. + */ +static void mali_pm_update_sync_internal(void) +{ + /* + * This should only be called in non-atomic context + * (normally as deferred work) + * + * Look at the pending power domain changes, and execute these. + * Make sure group and schedulers are notified about changes. + */ + + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + u32 power_down_mask; + u32 power_up_mask; + + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + +#if defined(DEBUG) + ++num_pm_updates; +#endif + + /* Hold PM state lock while we look at (and obey) the wanted state */ + mali_pm_state_lock(); + + MALI_DEBUG_PRINT(5, ("PM update pre: Wanted domain mask: .. [%s]\n", + mali_pm_mask_to_string(pd_mask_wanted))); + MALI_DEBUG_PRINT(5, ("PM update pre: Current domain mask: . [%s]\n", + mali_pm_mask_to_string(pd_mask_current))); + MALI_DEBUG_PRINT(5, ("PM update pre: Current PMU mask: .... [%s]\n", + mali_pm_mask_to_string(pmu_mask_current))); + MALI_DEBUG_PRINT(5, ("PM update pre: Group power stats: ... <%s>\n", + mali_pm_group_stats_to_string())); + + /* Figure out which cores we need to power on */ + power_up_mask = pd_mask_wanted & + (pd_mask_wanted ^ pd_mask_current); + + if (0 != power_up_mask) { + u32 power_up_mask_pmu; + struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS]; + u32 num_groups_up = 0; + struct mali_l2_cache_core * + l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES]; + u32 num_l2_up = 0; + u32 i; + +#if defined(DEBUG) + ++num_pm_updates_up; +#endif + + /* + * Make sure dummy/global domain is always included when + * powering up, since this is controlled by runtime PM, + * and device power is on at this stage. + */ + power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK; + + /* Power up only real PMU domains */ + power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK; + + /* But not those that happen to be powered on already */ + power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) & + power_up_mask; + + if (0 != power_up_mask_pmu) { + MALI_DEBUG_ASSERT(NULL != pmu); + pmu_mask_current |= power_up_mask_pmu; + mali_pmu_power_up(pmu, power_up_mask_pmu); + } + + /* + * Put the domains themselves in power up state. + * We get the groups and L2s to notify in return. + */ + mali_pm_domain_power_up(power_up_mask, + groups_up, &num_groups_up, + l2_up, &num_l2_up); + + /* Need to unlock PM state lock before notifying L2 + groups */ + mali_pm_state_unlock(); + + /* Notify each L2 cache that we have be powered up */ + for (i = 0; i < num_l2_up; i++) { + mali_l2_cache_power_up(l2_up[i]); + } + + /* + * Tell execution module about all the groups we have + * powered up. Groups will be notified as a result of this. + */ + mali_executor_group_power_up(groups_up, num_groups_up); + + /* Lock state again before checking for power down */ + mali_pm_state_lock(); + } + + /* Figure out which cores we need to power off */ + power_down_mask = pd_mask_current & + (pd_mask_wanted ^ pd_mask_current); + + /* + * Never power down the dummy/global domain here. This is to be done + * from a suspend request (since this domain is only physicall powered + * down at that point) + */ + power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK; + + if (0 != power_down_mask) { + u32 power_down_mask_pmu; + struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS]; + u32 num_groups_down = 0; + struct mali_l2_cache_core * + l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES]; + u32 num_l2_down = 0; + u32 i; + +#if defined(DEBUG) + ++num_pm_updates_down; +#endif + + /* + * Put the domains themselves in power down state. + * We get the groups and L2s to notify in return. + */ + mali_pm_domain_power_down(power_down_mask, + groups_down, &num_groups_down, + l2_down, &num_l2_down); + + /* Need to unlock PM state lock before notifying L2 + groups */ + mali_pm_state_unlock(); + + /* + * Tell execution module about all the groups we will be + * powering down. Groups will be notified as a result of this. + */ + if (0 < num_groups_down) { + mali_executor_group_power_down(groups_down, num_groups_down); + } + + /* Notify each L2 cache that we will be powering down */ + for (i = 0; i < num_l2_down; i++) { + mali_l2_cache_power_down(l2_down[i]); + } + + /* + * Power down only PMU domains which should not stay on + * Some domains might for instance currently be incorrectly + * powered up if default domain power state is all on. + */ + power_down_mask_pmu = pmu_mask_current & (~pd_mask_current); + + if (0 != power_down_mask_pmu) { + MALI_DEBUG_ASSERT(NULL != pmu); + pmu_mask_current &= ~power_down_mask_pmu; + mali_pmu_power_down(pmu, power_down_mask_pmu); + + } + } else { + /* + * Power down only PMU domains which should not stay on + * Some domains might for instance currently be incorrectly + * powered up if default domain power state is all on. + */ + u32 power_down_mask_pmu; + + /* No need for state lock since we'll only update PMU */ + mali_pm_state_unlock(); + + power_down_mask_pmu = pmu_mask_current & (~pd_mask_current); + + if (0 != power_down_mask_pmu) { + MALI_DEBUG_ASSERT(NULL != pmu); + pmu_mask_current &= ~power_down_mask_pmu; + mali_pmu_power_down(pmu, power_down_mask_pmu); + } + } + + MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n", + mali_pm_mask_to_string(pd_mask_current))); + MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n", + mali_pm_mask_to_string(pmu_mask_current))); + MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n", + mali_pm_group_stats_to_string())); +} + +static mali_bool mali_pm_common_suspend(void) +{ + mali_pm_state_lock(); + + if (0 != pd_mask_wanted) { + MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n")); + mali_pm_state_unlock(); + return MALI_FALSE; + } + + MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n", + mali_pm_mask_to_string(pd_mask_wanted))); + MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n", + mali_pm_mask_to_string(pd_mask_current))); + MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n", + mali_pm_mask_to_string(pmu_mask_current))); + MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n", + mali_pm_group_stats_to_string())); + + if (0 != pd_mask_current) { + /* + * We have still some domains powered on. + * It is for instance very normal that at least the + * dummy/global domain is marked as powered on at this point. + * (because it is physically powered on until this function + * returns) + */ + + struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS]; + u32 num_groups_down = 0; + struct mali_l2_cache_core * + l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES]; + u32 num_l2_down = 0; + u32 i; + + /* + * Put the domains themselves in power down state. + * We get the groups and L2s to notify in return. + */ + mali_pm_domain_power_down(pd_mask_current, + groups_down, + &num_groups_down, + l2_down, + &num_l2_down); + + MALI_DEBUG_ASSERT(0 == pd_mask_current); + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused()); + + /* Need to unlock PM state lock before notifying L2 + groups */ + mali_pm_state_unlock(); + + /* + * Tell execution module about all the groups we will be + * powering down. Groups will be notified as a result of this. + */ + if (0 < num_groups_down) { + mali_executor_group_power_down(groups_down, num_groups_down); + } + + /* Notify each L2 cache that we will be powering down */ + for (i = 0; i < num_l2_down; i++) { + mali_l2_cache_power_down(l2_down[i]); + } + + pmu_mask_current = 0; + } else { + MALI_DEBUG_ASSERT(0 == pmu_mask_current); + + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused()); + + mali_pm_state_unlock(); + } + + MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask: [%s]\n", + mali_pm_mask_to_string(pd_mask_current))); + MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n", + mali_pm_mask_to_string(pmu_mask_current))); + MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n", + mali_pm_group_stats_to_string())); + + return MALI_TRUE; +} + +static void mali_pm_update_work(void *data) +{ + MALI_IGNORE(data); + mali_pm_update_sync(); +} + +static _mali_osk_errcode_t mali_pm_create_pm_domains(void) +{ + int i; + + /* Create all domains (including dummy domain) */ + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (0x0 == domain_config[i]) continue; + + if (NULL == mali_pm_domain_create(domain_config[i])) { + return _MALI_OSK_ERR_NOMEM; + } + } + + return _MALI_OSK_ERR_OK; +} + +static void mali_pm_set_default_pm_domain_config(void) +{ + MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address()); + + /* GP core */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_GP, NULL)) { + domain_config[MALI_DOMAIN_INDEX_GP] = 0x01; + } + + /* PP0 - PP3 core */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP0, NULL)) { + if (mali_is_mali400()) { + domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2; + } else if (mali_is_mali450()) { + domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 0; + } + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP1, NULL)) { + if (mali_is_mali400()) { + domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3; + } else if (mali_is_mali450()) { + domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 1; + } + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP2, NULL)) { + if (mali_is_mali400()) { + domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4; + } else if (mali_is_mali450()) { + domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 1; + } + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP3, NULL)) { + if (mali_is_mali400()) { + domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5; + } else if (mali_is_mali450()) { + domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 1; + } + } + + /* PP4 - PP7 */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP4, NULL)) { + domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP5, NULL)) { + domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP6, NULL)) { + domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP7, NULL)) { + domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3; + } + + /* L2gp/L2PP0/L2PP4 */ + if (mali_is_mali400()) { + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI400_OFFSET_L2_CACHE0, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1; + } + } else if (mali_is_mali450()) { + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI450_OFFSET_L2_CACHE0, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI450_OFFSET_L2_CACHE1, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI450_OFFSET_L2_CACHE2, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3; + } + } else if (mali_is_mali470()) { + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI470_OFFSET_L2_CACHE1, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 0; + } + } +} + +static u32 mali_pm_get_registered_cores_mask(void) +{ + int i = 0; + u32 mask = 0; + + for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) { + mask |= domain_config[i]; + } + + return mask; +} + +static void mali_pm_set_pmu_domain_config(void) +{ + int i = 0; + + _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1); + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) { + if (0 != domain_config[i]) { + MALI_DEBUG_PRINT(2, ("Using customer pmu config:\n")); + break; + } + } + + if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) { + MALI_DEBUG_PRINT(2, ("Using hw detect pmu config:\n")); + mali_pm_set_default_pm_domain_config(); + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) { + if (domain_config[i]) { + MALI_DEBUG_PRINT(2, ("domain_config[%d] = 0x%x \n", i, domain_config[i])); + } + } + /* Can't override dummy domain mask */ + domain_config[MALI_DOMAIN_INDEX_DUMMY] = + 1 << MALI_DOMAIN_INDEX_DUMMY; +} + +#if defined(DEBUG) +const char *mali_pm_mask_to_string(u32 mask) +{ + static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1]; + int bit; + int str_pos = 0; + + /* Must be protected by lock since we use shared string buffer */ + if (NULL != pm_lock_exec) { + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + } + + for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) { + if (mask & (1 << bit)) { + bit_str[str_pos] = 'X'; + } else { + bit_str[str_pos] = '-'; + } + str_pos++; + } + + bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0'; + + return bit_str; +} + +const char *mali_pm_group_stats_to_string(void) +{ + static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1]; + u32 num_groups = mali_group_get_glob_num_groups(); + u32 i; + + /* Must be protected by lock since we use shared string buffer */ + if (NULL != pm_lock_exec) { + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + } + + for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) { + struct mali_group *group; + + group = mali_group_get_glob_group(i); + + if (MALI_TRUE == mali_group_power_is_on(group)) { + bit_str[i] = 'X'; + } else { + bit_str[i] = '-'; + } + } + + bit_str[i] = '\0'; + + return bit_str; +} +#endif + +/* + * num_pp is the number of PP cores which will be powered on given this mask + * cost is the total power cost of cores which will be powered on given this mask + */ +static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost) +{ + u32 i; + + /* loop through all cores */ + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (!(domain_config[i] & mask)) { + continue; + } + + switch (i) { + case MALI_DOMAIN_INDEX_GP: + *cost += MALI_GP_COST; + + break; + case MALI_DOMAIN_INDEX_PP0: /* Fall through */ + case MALI_DOMAIN_INDEX_PP1: /* Fall through */ + case MALI_DOMAIN_INDEX_PP2: /* Fall through */ + case MALI_DOMAIN_INDEX_PP3: + if (mali_is_mali400()) { + if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask) + || (domain_config[MALI_DOMAIN_INDEX_DUMMY] + == domain_config[MALI_DOMAIN_INDEX_L20])) { + *num_pp += 1; + } + } else { + if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask) + || (domain_config[MALI_DOMAIN_INDEX_DUMMY] + == domain_config[MALI_DOMAIN_INDEX_L21])) { + *num_pp += 1; + } + } + + *cost += MALI_PP_COST; + break; + case MALI_DOMAIN_INDEX_PP4: /* Fall through */ + case MALI_DOMAIN_INDEX_PP5: /* Fall through */ + case MALI_DOMAIN_INDEX_PP6: /* Fall through */ + case MALI_DOMAIN_INDEX_PP7: + MALI_DEBUG_ASSERT(mali_is_mali450()); + + if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask) + || (domain_config[MALI_DOMAIN_INDEX_DUMMY] + == domain_config[MALI_DOMAIN_INDEX_L22])) { + *num_pp += 1; + } + + *cost += MALI_PP_COST; + break; + case MALI_DOMAIN_INDEX_L20: /* Fall through */ + case MALI_DOMAIN_INDEX_L21: /* Fall through */ + case MALI_DOMAIN_INDEX_L22: + *cost += MALI_L2_COST; + + break; + } + } +} + +void mali_pm_power_cost_setup(void) +{ + /* + * Two parallel arrays which store the best domain mask and its cost + * The index is the number of PP cores, E.g. Index 0 is for 1 PP option, + * might have mask 0x2 and with cost of 1, lower cost is better + */ + u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 }; + u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 }; + /* Array cores_in_domain is used to store the total pp cores in each pm domain. */ + u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 }; + /* Domain_count is used to represent the max domain we have.*/ + u32 max_domain_mask = 0; + u32 max_domain_id = 0; + u32 always_on_pp_cores = 0; + + u32 num_pp, cost, mask; + u32 i, j , k; + + /* Initialize statistics */ + for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) { + best_mask[i] = 0; + best_cost[i] = 0xFFFFFFFF; /* lower cost is better */ + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) { + for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) { + mali_pm_domain_power_cost_result[i][j] = 0; + } + } + + /* Caculate number of pp cores of a given domain config. */ + for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) { + if (0 < domain_config[i]) { + /* Get the max domain mask value used to caculate power cost + * and we don't count in always on pp cores. */ + if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i] + && max_domain_mask < domain_config[i]) { + max_domain_mask = domain_config[i]; + } + + if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) { + always_on_pp_cores++; + } + } + } + max_domain_id = _mali_osk_fls(max_domain_mask); + + /* + * Try all combinations of power domains and check how many PP cores + * they have and their power cost. + */ + for (mask = 0; mask < (1 << max_domain_id); mask++) { + num_pp = 0; + cost = 0; + + mali_pm_stat_from_mask(mask, &num_pp, &cost); + + /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */ + for (i = 0; i < num_pp; i++) { + if (best_cost[i] >= cost) { + best_cost[i] = cost; + best_mask[i] = mask; + } + } + } + + /* + * If we want to enable x pp cores, if x is less than number of always_on pp cores, + * all of pp cores we will enable must be always_on pp cores. + */ + for (i = 0; i < mali_executor_get_num_cores_total(); i++) { + if (i < always_on_pp_cores) { + mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1] + = i + 1; + } else { + mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1] + = always_on_pp_cores; + } + } + + /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */ + for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) { + if (best_mask[i] == 0) { + /* This MP variant is not available */ + continue; + } + + for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) { + cores_in_domain[j] = 0; + } + + for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) { + if (0 < domain_config[j] + && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) { + cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++; + } + } + + /* In this loop, j represent for the number we have already enabled.*/ + for (j = 0; j <= i;) { + /* j used to visit all of domain to get the number of pp cores remained in it. */ + for (k = 0; k < max_domain_id; k++) { + /* If domain k in best_mask[i] is enabled and this domain has extra pp cores, + * we know we must pick at least one pp core from this domain. + * And then we move to next enabled pm domain. */ + if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) { + cores_in_domain[k]--; + mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++; + j++; + if (j > i) { + break; + } + } + } + } + } +} + +/* + * When we are doing core scaling, + * this function is called to return the best mask to + * achieve the best pp group power cost. + */ +void mali_pm_get_best_power_cost_mask(int num_requested, int *dst) +{ + MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested)); + + _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int)); +} + +u32 mali_pm_get_current_mask(void) +{ + return pd_mask_current; +} + +u32 mali_pm_get_wanted_mask(void) +{ + return pd_mask_wanted; +} diff --git a/drivers/gpu/arm/utgard/common/mali_pm.h b/drivers/gpu/arm/utgard/common/mali_pm.h new file mode 100644 index 000000000000..d72c732e698d --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pm.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PM_H__ +#define __MALI_PM_H__ + +#include "mali_osk.h" +#include "mali_pm_domain.h" + +#define MALI_DOMAIN_INDEX_GP 0 +#define MALI_DOMAIN_INDEX_PP0 1 +#define MALI_DOMAIN_INDEX_PP1 2 +#define MALI_DOMAIN_INDEX_PP2 3 +#define MALI_DOMAIN_INDEX_PP3 4 +#define MALI_DOMAIN_INDEX_PP4 5 +#define MALI_DOMAIN_INDEX_PP5 6 +#define MALI_DOMAIN_INDEX_PP6 7 +#define MALI_DOMAIN_INDEX_PP7 8 +#define MALI_DOMAIN_INDEX_L20 9 +#define MALI_DOMAIN_INDEX_L21 10 +#define MALI_DOMAIN_INDEX_L22 11 +/* + * The dummy domain is used when there is no physical power domain + * (e.g. no PMU or always on cores) + */ +#define MALI_DOMAIN_INDEX_DUMMY 12 +#define MALI_MAX_NUMBER_OF_DOMAINS 13 + +/** + * Initialize the Mali PM module + * + * PM module covers Mali PM core, PM domains and Mali PMU + */ +_mali_osk_errcode_t mali_pm_initialize(void); + +/** + * Terminate the Mali PM module + */ +void mali_pm_terminate(void); + +void mali_pm_exec_lock(void); +void mali_pm_exec_unlock(void); + + +struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index, + struct mali_l2_cache_core *l2_cache); +struct mali_pm_domain *mali_pm_register_group(u32 domain_index, + struct mali_group *group); + +mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains, + struct mali_group **groups, + u32 num_domains); +mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains, + u32 num_domains); + +void mali_pm_init_begin(void); +void mali_pm_init_end(void); + +void mali_pm_update_sync(void); +void mali_pm_update_async(void); + +/* Callback functions for system power management */ +void mali_pm_os_suspend(mali_bool os_suspend); +void mali_pm_os_resume(void); + +mali_bool mali_pm_runtime_suspend(void); +void mali_pm_runtime_resume(void); + +#if MALI_STATE_TRACKING +u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain, + char *buf, u32 size); +#endif + +void mali_pm_power_cost_setup(void); + +void mali_pm_get_best_power_cost_mask(int num_requested, int *dst); + +#if defined(DEBUG) +const char *mali_pm_mask_to_string(u32 mask); +#endif + +u32 mali_pm_get_current_mask(void); +u32 mali_pm_get_wanted_mask(void); +#endif /* __MALI_PM_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_pm_domain.c b/drivers/gpu/arm/utgard/common/mali_pm_domain.c new file mode 100644 index 000000000000..dbf985e6d37b --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pm_domain.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_pm_domain.h" +#include "mali_pmu.h" +#include "mali_group.h" +#include "mali_pm.h" + +static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] = +{ NULL, }; + +void mali_pm_domain_initialize(void) +{ + /* Domains will be initialized/created on demand */ +} + +void mali_pm_domain_terminate(void) +{ + int i; + + /* Delete all domains that has been created */ + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + mali_pm_domain_delete(mali_pm_domains[i]); + mali_pm_domains[i] = NULL; + } +} + +struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask) +{ + struct mali_pm_domain *domain = NULL; + u32 domain_id = 0; + + domain = mali_pm_domain_get_from_mask(pmu_mask); + if (NULL != domain) return domain; + + MALI_DEBUG_PRINT(2, + ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", + pmu_mask)); + + domain = (struct mali_pm_domain *)_mali_osk_malloc( + sizeof(struct mali_pm_domain)); + if (NULL != domain) { + domain->power_is_on = MALI_FALSE; + domain->pmu_mask = pmu_mask; + domain->use_count = 0; + _mali_osk_list_init(&domain->group_list); + _mali_osk_list_init(&domain->l2_cache_list); + + domain_id = _mali_osk_fls(pmu_mask) - 1; + /* Verify the domain_id */ + MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id); + /* Verify that pmu_mask only one bit is set */ + MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask); + mali_pm_domains[domain_id] = domain; + + return domain; + } else { + MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n")); + } + + return NULL; +} + +void mali_pm_domain_delete(struct mali_pm_domain *domain) +{ + if (NULL == domain) { + return; + } + + _mali_osk_list_delinit(&domain->group_list); + _mali_osk_list_delinit(&domain->l2_cache_list); + + _mali_osk_free(domain); +} + +void mali_pm_domain_add_group(struct mali_pm_domain *domain, + struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + MALI_DEBUG_ASSERT_POINTER(group); + + /* + * Use addtail because virtual group is created last and it needs + * to be at the end of the list (in order to be activated after + * all children. + */ + _mali_osk_list_addtail(&group->pm_domain_list, &domain->group_list); +} + +void mali_pm_domain_add_l2_cache(struct mali_pm_domain *domain, + struct mali_l2_cache_core *l2_cache) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + MALI_DEBUG_ASSERT_POINTER(l2_cache); + _mali_osk_list_add(&l2_cache->pm_domain_list, &domain->l2_cache_list); +} + +struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask) +{ + u32 id = 0; + + if (0 == mask) { + return NULL; + } + + id = _mali_osk_fls(mask) - 1; + + MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id); + /* Verify that pmu_mask only one bit is set */ + MALI_DEBUG_ASSERT((1 << id) == mask); + + return mali_pm_domains[id]; +} + +struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id) +{ + MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id); + + return mali_pm_domains[id]; +} + +u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + + if (0 == domain->use_count) { + _mali_osk_pm_dev_ref_get_async(); + } + + ++domain->use_count; + MALI_DEBUG_PRINT(4, ("PM domain %p: ref_get, use_count => %u\n", domain, domain->use_count)); + + /* Return our mask so caller can check this against wanted mask */ + return domain->pmu_mask; +} + +u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + + --domain->use_count; + MALI_DEBUG_PRINT(4, ("PM domain %p: ref_put, use_count => %u\n", domain, domain->use_count)); + + if (0 == domain->use_count) { + _mali_osk_pm_dev_ref_put(); + } + + /* + * Return the PMU mask which now could be be powered down + * (the bit for this domain). + * This is the responsibility of the caller (mali_pm) + */ + return (0 == domain->use_count ? domain->pmu_mask : 0); +} + +#if MALI_STATE_TRACKING +u32 mali_pm_domain_get_id(struct mali_pm_domain *domain) +{ + u32 id = 0; + + MALI_DEBUG_ASSERT_POINTER(domain); + MALI_DEBUG_ASSERT(0 != domain->pmu_mask); + + id = _mali_osk_fls(domain->pmu_mask) - 1; + + MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id); + /* Verify that pmu_mask only one bit is set */ + MALI_DEBUG_ASSERT((1 << id) == domain->pmu_mask); + /* Verify that we have stored the domain at right id/index */ + MALI_DEBUG_ASSERT(domain == mali_pm_domains[id]); + + return id; +} +#endif + +#if defined(DEBUG) +mali_bool mali_pm_domain_all_unused(void) +{ + int i; + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (NULL == mali_pm_domains[i]) { + /* Nothing to check */ + continue; + } + + if (MALI_TRUE == mali_pm_domains[i]->power_is_on) { + /* Not ready for suspend! */ + return MALI_FALSE; + } + + if (0 != mali_pm_domains[i]->use_count) { + /* Not ready for suspend! */ + return MALI_FALSE; + } + } + + return MALI_TRUE; +} +#endif diff --git a/drivers/gpu/arm/utgard/common/mali_pm_domain.h b/drivers/gpu/arm/utgard/common/mali_pm_domain.h new file mode 100644 index 000000000000..aceb3449359a --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pm_domain.h @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PM_DOMAIN_H__ +#define __MALI_PM_DOMAIN_H__ + +#include "mali_kernel_common.h" +#include "mali_osk.h" + +#include "mali_l2_cache.h" +#include "mali_group.h" +#include "mali_pmu.h" + +/* Instances are protected by PM state lock */ +struct mali_pm_domain { + mali_bool power_is_on; + s32 use_count; + u32 pmu_mask; + + /* Zero or more groups can belong to this domain */ + _mali_osk_list_t group_list; + + /* Zero or more L2 caches can belong to this domain */ + _mali_osk_list_t l2_cache_list; +}; + + +void mali_pm_domain_initialize(void); +void mali_pm_domain_terminate(void); + +struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask); +void mali_pm_domain_delete(struct mali_pm_domain *domain); + +void mali_pm_domain_add_l2_cache( + struct mali_pm_domain *domain, + struct mali_l2_cache_core *l2_cache); +void mali_pm_domain_add_group(struct mali_pm_domain *domain, + struct mali_group *group); + +struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask); +struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id); + +/* Ref counting */ +u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain); +u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain); + +MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_group_list( + struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return &domain->group_list; +} + +MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_l2_cache_list( + struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return &domain->l2_cache_list; +} + +MALI_STATIC_INLINE mali_bool mali_pm_domain_power_is_on( + struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return domain->power_is_on; +} + +MALI_STATIC_INLINE void mali_pm_domain_set_power_on( + struct mali_pm_domain *domain, + mali_bool power_is_on) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + domain->power_is_on = power_is_on; +} + +MALI_STATIC_INLINE u32 mali_pm_domain_get_use_count( + struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return domain->use_count; +} + +#if MALI_STATE_TRACKING +u32 mali_pm_domain_get_id(struct mali_pm_domain *domain); + +MALI_STATIC_INLINE u32 mali_pm_domain_get_mask(struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return domain->pmu_mask; +} +#endif + +#if defined(DEBUG) +mali_bool mali_pm_domain_all_unused(void); +#endif + +#endif /* __MALI_PM_DOMAIN_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_pmu.c b/drivers/gpu/arm/utgard/common/mali_pmu.c new file mode 100644 index 000000000000..2a3008a6dd83 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pmu.c @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_pmu.c + * Mali driver functions for Mali 400 PMU hardware + */ +#include "mali_hw_core.h" +#include "mali_pmu.h" +#include "mali_pp.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_pm.h" +#include "mali_osk_mali.h" + +struct mali_pmu_core *mali_global_pmu_core = NULL; + +static _mali_osk_errcode_t mali_pmu_wait_for_command_finish( + struct mali_pmu_core *pmu); + +struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource) +{ + struct mali_pmu_core *pmu; + + MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core); + MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n")); + + pmu = (struct mali_pmu_core *)_mali_osk_malloc( + sizeof(struct mali_pmu_core)); + if (NULL != pmu) { + pmu->registered_cores_mask = 0; /* to be set later */ + + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, + resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) { + + pmu->switch_delay = _mali_osk_get_pmu_switch_delay(); + + mali_global_pmu_core = pmu; + + return pmu; + } + _mali_osk_free(pmu); + } + + return NULL; +} + +void mali_pmu_delete(struct mali_pmu_core *pmu) +{ + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core); + + MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n")); + + mali_global_pmu_core = NULL; + + mali_hw_core_delete(&pmu->hw_core); + _mali_osk_free(pmu); +} + +void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask) +{ + pmu->registered_cores_mask = mask; +} + +void mali_pmu_reset(struct mali_pmu_core *pmu) +{ + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + + /* Setup the desired defaults */ + mali_hw_core_register_write_relaxed(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_MASK, 0); + mali_hw_core_register_write_relaxed(&pmu->hw_core, + PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay); +} + +void mali_pmu_power_up_all(struct mali_pmu_core *pmu) +{ + u32 stat; + + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + + mali_pm_exec_lock(); + + mali_pmu_reset(pmu); + + /* Now simply power up the domains which are marked as powered down */ + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + mali_pmu_power_up(pmu, stat); + + mali_pm_exec_unlock(); +} + +void mali_pmu_power_down_all(struct mali_pmu_core *pmu) +{ + u32 stat; + + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + + mali_pm_exec_lock(); + + /* Now simply power down the domains which are marked as powered up */ + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask); + + mali_pm_exec_unlock(); +} + +_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask) +{ + u32 stat; + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask); + MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_RAWSTAT) & + PMU_REG_VAL_IRQ)); + + MALI_DEBUG_PRINT(3, + ("PMU power down: ...................... [%s]\n", + mali_pm_mask_to_string(mask))); + + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + + /* + * Assert that we are not powering down domains which are already + * powered down. + */ + MALI_DEBUG_ASSERT(0 == (stat & mask)); + + mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY); + + if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK; + + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_POWER_DOWN, mask); + + /* + * Do not wait for interrupt on Mali-300/400 if all domains are + * powered off by our power down command, because the HW will simply + * not generate an interrupt in this case. + */ + if (mali_is_mali450() || mali_is_mali470() || pmu->registered_cores_mask != (mask | stat)) { + err = mali_pmu_wait_for_command_finish(pmu); + if (_MALI_OSK_ERR_OK != err) { + return err; + } + } else { + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ); + } + +#if defined(DEBUG) + /* Verify power status of domains after power down */ + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + MALI_DEBUG_ASSERT(mask == (stat & mask)); +#endif + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask) +{ + u32 stat; + _mali_osk_errcode_t err; +#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP) + u32 current_domain; +#endif + + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask); + MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_RAWSTAT) & + PMU_REG_VAL_IRQ)); + + MALI_DEBUG_PRINT(3, + ("PMU power up: ........................ [%s]\n", + mali_pm_mask_to_string(mask))); + + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + stat &= pmu->registered_cores_mask; + + mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY); + if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK; + + /* + * Assert that we are only powering up domains which are currently + * powered down. + */ + MALI_DEBUG_ASSERT(mask == (stat & mask)); + +#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP) + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_POWER_UP, mask); + + err = mali_pmu_wait_for_command_finish(pmu); + if (_MALI_OSK_ERR_OK != err) { + return err; + } +#else + for (current_domain = 1; + current_domain <= pmu->registered_cores_mask; + current_domain <<= 1) { + if (current_domain & mask & stat) { + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_POWER_UP, + current_domain); + + err = mali_pmu_wait_for_command_finish(pmu); + if (_MALI_OSK_ERR_OK != err) { + return err; + } + } + } +#endif + +#if defined(DEBUG) + /* Verify power status of domains after power up */ + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + MALI_DEBUG_ASSERT(0 == (stat & mask)); +#endif /* defined(DEBUG) */ + + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_pmu_wait_for_command_finish( + struct mali_pmu_core *pmu) +{ + u32 rawstat; + u32 timeout = MALI_REG_POLL_COUNT_SLOW; + + MALI_DEBUG_ASSERT(pmu); + + /* Wait for the command to complete */ + do { + rawstat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_RAWSTAT); + --timeout; + } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout); + + MALI_DEBUG_ASSERT(0 < timeout); + + if (0 == timeout) { + return _MALI_OSK_ERR_TIMEOUT; + } + + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ); + + return _MALI_OSK_ERR_OK; +} diff --git a/drivers/gpu/arm/utgard/common/mali_pmu.h b/drivers/gpu/arm/utgard/common/mali_pmu.h new file mode 100644 index 000000000000..5ca78795f535 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pmu.h @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_platform.h + * Platform specific Mali driver functions + */ + +#ifndef __MALI_PMU_H__ +#define __MALI_PMU_H__ + +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_hw_core.h" + +/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask + */ +struct mali_pmu_core { + struct mali_hw_core hw_core; + u32 registered_cores_mask; + u32 switch_delay; +}; + +/** @brief Register layout for hardware PMU + */ +typedef enum { + PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */ + PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */ + PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */ + PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */ + PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */ + PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */ + PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */ + PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */ +} pmu_reg_addr_mgmt_addr; + +#define PMU_REG_VAL_IRQ 1 + +extern struct mali_pmu_core *mali_global_pmu_core; + +/** @brief Initialisation of MALI PMU + * + * This is called from entry point of the driver in order to create and intialize the PMU resource + * + * @param resource it will be a pointer to a PMU resource + * @param number_of_pp_cores Number of found PP resources in configuration + * @param number_of_l2_caches Number of found L2 cache resources in configuration + * @return The created PMU object, or NULL in case of failure. + */ +struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource); + +/** @brief It deallocates the PMU resource + * + * This is called on the exit of the driver to terminate the PMU resource + * + * @param pmu Pointer to PMU core object to delete + */ +void mali_pmu_delete(struct mali_pmu_core *pmu); + +/** @brief Set registered cores mask + * + * @param pmu Pointer to PMU core object + * @param mask All available/valid domain bits + */ +void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask); + +/** @brief Retrieves the Mali PMU core object (if any) + * + * @return The Mali PMU object, or NULL if no PMU exists. + */ +MALI_STATIC_INLINE struct mali_pmu_core *mali_pmu_get_global_pmu_core(void) +{ + return mali_global_pmu_core; +} + +/** @brief Reset PMU core + * + * @param pmu Pointer to PMU core object to reset + */ +void mali_pmu_reset(struct mali_pmu_core *pmu); + +void mali_pmu_power_up_all(struct mali_pmu_core *pmu); + +void mali_pmu_power_down_all(struct mali_pmu_core *pmu); + +/** @brief Returns a mask of the currently powered up domains + * + * @param pmu Pointer to PMU core object + */ +MALI_STATIC_INLINE u32 mali_pmu_get_mask(struct mali_pmu_core *pmu) +{ + u32 stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS); + return ((~stat) & pmu->registered_cores_mask); +} + +/** @brief MALI GPU power down using MALI in-built PMU + * + * Called to power down the specified cores. + * + * @param pmu Pointer to PMU core object to power down + * @param mask Mask specifying which power domains to power down + * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error. + */ +_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask); + +/** @brief MALI GPU power up using MALI in-built PMU + * + * Called to power up the specified cores. + * + * @param pmu Pointer to PMU core object to power up + * @param mask Mask specifying which power domains to power up + * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error. + */ +_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask); + +#endif /* __MALI_PMU_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_pp.c b/drivers/gpu/arm/utgard/common/mali_pp.c new file mode 100644 index 000000000000..68bfd50bf9ae --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pp.c @@ -0,0 +1,501 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_pp_job.h" +#include "mali_pp.h" +#include "mali_hw_core.h" +#include "mali_group.h" +#include "regs/mali_200_regs.h" +#include "mali_kernel_common.h" +#include "mali_kernel_core.h" +#if defined(CONFIG_MALI400_PROFILING) +#include "mali_osk_profiling.h" +#endif + +/* Number of frame registers on Mali-200 */ +#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1) +/* Number of frame registers on Mali-300 and later */ +#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1) + +static struct mali_pp_core *mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL }; +static u32 mali_global_num_pp_cores = 0; + +/* Interrupt handlers */ +static void mali_pp_irq_probe_trigger(void *data); +static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data); + +struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id) +{ + struct mali_pp_core *core = NULL; + + MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description)); + MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base)); + + if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) { + MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n")); + return NULL; + } + + core = _mali_osk_calloc(1, sizeof(struct mali_pp_core)); + if (NULL != core) { + core->core_id = mali_global_num_pp_cores; + core->bcast_id = bcast_id; + + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) { + _mali_osk_errcode_t ret; + + if (!is_virtual) { + ret = mali_pp_reset(core); + } else { + ret = _MALI_OSK_ERR_OK; + } + + if (_MALI_OSK_ERR_OK == ret) { + ret = mali_group_add_pp_core(group, core); + if (_MALI_OSK_ERR_OK == ret) { + /* Setup IRQ handlers (which will do IRQ probing if needed) */ + MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq); + + core->irq = _mali_osk_irq_init(resource->irq, + mali_group_upper_half_pp, + group, + mali_pp_irq_probe_trigger, + mali_pp_irq_probe_ack, + core, + resource->description); + if (NULL != core->irq) { + mali_global_pp_cores[mali_global_num_pp_cores] = core; + mali_global_num_pp_cores++; + + return core; + } else { + MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description)); + } + mali_group_remove_pp_core(group); + } else { + MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description)); + } + } + mali_hw_core_delete(&core->hw_core); + } + + _mali_osk_free(core); + } else { + MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n")); + } + + return NULL; +} + +void mali_pp_delete(struct mali_pp_core *core) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(core); + + _mali_osk_irq_term(core->irq); + mali_hw_core_delete(&core->hw_core); + + /* Remove core from global list */ + for (i = 0; i < mali_global_num_pp_cores; i++) { + if (mali_global_pp_cores[i] == core) { + mali_global_pp_cores[i] = NULL; + mali_global_num_pp_cores--; + + if (i != mali_global_num_pp_cores) { + /* We removed a PP core from the middle of the array -- move the last + * PP core to the current position to close the gap */ + mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores]; + mali_global_pp_cores[mali_global_num_pp_cores] = NULL; + } + + break; + } + } + + _mali_osk_free(core); +} + +void mali_pp_stop_bus(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + /* Will only send the stop bus command, and not wait for it to complete */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS); +} + +_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core) +{ + int i; + + MALI_DEBUG_ASSERT_POINTER(core); + + /* Send the stop bus command. */ + mali_pp_stop_bus(core); + + /* Wait for bus to be stopped */ + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED) + break; + } + + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS))); + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +/* Frame register reset values. + * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */ +static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] = { + 0x0, /* Renderer List Address Register */ + 0x0, /* Renderer State Word Base Address Register */ + 0x0, /* Renderer Vertex Base Register */ + 0x2, /* Feature Enable Register */ + 0x0, /* Z Clear Value Register */ + 0x0, /* Stencil Clear Value Register */ + 0x0, /* ABGR Clear Value 0 Register */ + 0x0, /* ABGR Clear Value 1 Register */ + 0x0, /* ABGR Clear Value 2 Register */ + 0x0, /* ABGR Clear Value 3 Register */ + 0x0, /* Bounding Box Left Right Register */ + 0x0, /* Bounding Box Bottom Register */ + 0x0, /* FS Stack Address Register */ + 0x0, /* FS Stack Size and Initial Value Register */ + 0x0, /* Reserved */ + 0x0, /* Reserved */ + 0x0, /* Origin Offset X Register */ + 0x0, /* Origin Offset Y Register */ + 0x75, /* Subpixel Specifier Register */ + 0x0, /* Tiebreak mode Register */ + 0x0, /* Polygon List Format Register */ + 0x0, /* Scaling Register */ + 0x0 /* Tilebuffer configuration Register */ +}; + +/* WBx register reset values */ +static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = { + 0x0, /* WBx Source Select Register */ + 0x0, /* WBx Target Address Register */ + 0x0, /* WBx Target Pixel Format Register */ + 0x0, /* WBx Target AA Format Register */ + 0x0, /* WBx Target Layout */ + 0x0, /* WBx Target Scanline Length */ + 0x0, /* WBx Target Flags Register */ + 0x0, /* WBx MRT Enable Register */ + 0x0, /* WBx MRT Offset Register */ + 0x0, /* WBx Global Test Enable Register */ + 0x0, /* WBx Global Test Reference Value Register */ + 0x0 /* WBx Global Test Compare Function Register */ +}; + +/* Performance Counter 0 Enable Register reset value */ +static const u32 mali_perf_cnt_enable_reset_value = 0; + +_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) +{ + /* Bus must be stopped before calling this function */ + const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT; + const u32 reset_invalid_value = 0xC0FFE000; + const u32 reset_check_value = 0xC01A0000; + int i; + + MALI_DEBUG_ASSERT_POINTER(core); + MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description)); + + /* Set register to a bogus value. The register will be used to detect when reset is complete */ + mali_hw_core_register_write_relaxed(&core->hw_core, reset_wait_target_register, reset_invalid_value); + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE); + + /* Force core to reset */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET); + + /* Wait for reset to be complete */ + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value); + if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) { + break; + } + } + + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n")); + } + + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, 0x00000000); /* set it back to the default */ + /* Re-enable interrupts */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); + + return _MALI_OSK_ERR_OK; +} + +void mali_pp_reset_async(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + + MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description)); + + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET); +} + +_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core) +{ + int i; + u32 rawstat = 0; + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS); + if (!(status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) { + rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT); + if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) { + break; + } + } + } + + if (i == MALI_REG_POLL_COUNT_FAST) { + MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n", + core->hw_core.description, rawstat)); + return _MALI_OSK_ERR_FAULT; + } + + /* Re-enable interrupts */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core) +{ + mali_pp_reset_async(core); + return mali_pp_reset_wait(core); +} + +void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual) +{ + u32 relative_address; + u32 start_index; + u32 nr_of_regs; + u32 *frame_registers = mali_pp_job_get_frame_registers(job); + u32 *wb0_registers = mali_pp_job_get_wb0_registers(job); + u32 *wb1_registers = mali_pp_job_get_wb1_registers(job); + u32 *wb2_registers = mali_pp_job_get_wb2_registers(job); + u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job); + u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job); + + MALI_DEBUG_ASSERT_POINTER(core); + + /* Write frame registers */ + + /* + * There are two frame registers which are different for each sub job: + * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME) + * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK) + */ + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]); + + /* For virtual jobs, the stack address shouldn't be broadcast but written individually */ + if (!mali_pp_job_is_virtual(job) || restart_virtual) { + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]); + } + + /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */ + relative_address = MALI200_REG_ADDR_RSW; + start_index = MALI200_REG_ADDR_RSW / sizeof(u32); + nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32); + + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, + relative_address, &frame_registers[start_index], + nr_of_regs, &mali_frame_registers_reset_values[start_index]); + + /* MALI200_REG_ADDR_STACK_SIZE */ + relative_address = MALI200_REG_ADDR_STACK_SIZE; + start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32); + + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, + relative_address, frame_registers[start_index], + mali_frame_registers_reset_values[start_index]); + + /* Skip 2 reserved registers */ + + /* Write remaining registers */ + relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X; + start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32); + nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32); + + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, + relative_address, &frame_registers[start_index], + nr_of_regs, &mali_frame_registers_reset_values[start_index]); + + /* Write WBx registers */ + if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */ + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values); + } + + if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */ + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values); + } + + if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */ + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values); + } + + if (MALI_HW_CORE_NO_COUNTER != counter_src0) { + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0); + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value); + } + if (MALI_HW_CORE_NO_COUNTER != counter_src1) { + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1); + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value); + } + +#ifdef CONFIG_MALI400_HEATMAPS_ENABLED + if (job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) { + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1); + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8); + } +#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */ + + MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description)); + + /* Adding barrier to make sure all rester writes are finished */ + _mali_osk_write_mem_barrier(); + + /* This is the command that starts the core. + * + * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just + * force core to assert the completion interrupt. + */ +#if !defined(PROFILING_SKIP_PP_JOBS) + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING); +#else + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME); +#endif + + /* Adding barrier to make sure previous rester writes is finished */ + _mali_osk_write_mem_barrier(); +} + +u32 mali_pp_core_get_version(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION); +} + +struct mali_pp_core *mali_pp_get_global_pp_core(u32 index) +{ + if (mali_global_num_pp_cores > index) { + return mali_global_pp_cores[index]; + } + + return NULL; +} + +u32 mali_pp_get_glob_num_pp_cores(void) +{ + return mali_global_num_pp_cores; +} + +/* ------------- interrupt handling below ------------------ */ +static void mali_pp_irq_probe_trigger(void *data) +{ + struct mali_pp_core *core = (struct mali_pp_core *)data; + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_BUS_ERROR); + _mali_osk_mem_barrier(); +} + +static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data) +{ + struct mali_pp_core *core = (struct mali_pp_core *)data; + u32 irq_readout; + + irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS); + if (MALI200_REG_VAL_IRQ_BUS_ERROR & irq_readout) { + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_BUS_ERROR); + _mali_osk_mem_barrier(); + return _MALI_OSK_ERR_OK; + } + + return _MALI_OSK_ERR_FAULT; +} + + +#if 0 +static void mali_pp_print_registers(struct mali_pp_core *core) +{ + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE))); +} +#endif + +#if 0 +void mali_pp_print_state(struct mali_pp_core *core) +{ + MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS))); +} +#endif + +void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob) +{ + u32 val0 = 0; + u32 val1 = 0; + u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, subjob); + u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, subjob); +#if defined(CONFIG_MALI400_PROFILING) + int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id); +#endif + + if (MALI_HW_CORE_NO_COUNTER != counter_src0) { + val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE); + mali_pp_job_set_perf_counter_value0(job, subjob, val0); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_report_hw_counter(counter_index, val0); + _mali_osk_profiling_record_global_counters(counter_index, val0); +#endif + } + + if (MALI_HW_CORE_NO_COUNTER != counter_src1) { + val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE); + mali_pp_job_set_perf_counter_value1(job, subjob, val1); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_report_hw_counter(counter_index + 1, val1); + _mali_osk_profiling_record_global_counters(counter_index + 1, val1); +#endif + } +} + +#if MALI_STATE_TRACKING +u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size) +{ + int n = 0; + + n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description); + + return n; +} +#endif diff --git a/drivers/gpu/arm/utgard/common/mali_pp.h b/drivers/gpu/arm/utgard/common/mali_pp.h new file mode 100644 index 000000000000..45712a30e831 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pp.h @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PP_H__ +#define __MALI_PP_H__ + +#include "mali_osk.h" +#include "mali_pp_job.h" +#include "mali_hw_core.h" + +struct mali_group; + +#define MALI_MAX_NUMBER_OF_PP_CORES 9 + +/** + * Definition of the PP core struct + * Used to track a PP core in the system. + */ +struct mali_pp_core { + struct mali_hw_core hw_core; /**< Common for all HW cores */ + _mali_osk_irq_t *irq; /**< IRQ handler */ + u32 core_id; /**< Unique core ID */ + u32 bcast_id; /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */ +}; + +_mali_osk_errcode_t mali_pp_initialize(void); +void mali_pp_terminate(void); + +struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id); +void mali_pp_delete(struct mali_pp_core *core); + +void mali_pp_stop_bus(struct mali_pp_core *core); +_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core); +void mali_pp_reset_async(struct mali_pp_core *core); +_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core); +_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core); +_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core); + +void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual); + +u32 mali_pp_core_get_version(struct mali_pp_core *core); + +MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return core->core_id; +} + +MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return core->bcast_id; +} + +struct mali_pp_core *mali_pp_get_global_pp_core(u32 index); +u32 mali_pp_get_glob_num_pp_cores(void); + +/* Debug */ +u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size); + +/** + * Put instrumented HW counters from the core(s) to the job object (if enabled) + * + * parent and child is always the same, except for virtual jobs on Mali-450. + * In this case, the counters will be enabled on the virtual core (parent), + * but values need to be read from the child cores. + * + * @param parent The core used to see if the counters was enabled + * @param child The core to actually read the values from + * @job Job object to update with counter values (if enabled) + * @subjob Which subjob the counters are applicable for (core ID for virtual jobs) + */ +void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob); + +MALI_STATIC_INLINE const char *mali_pp_core_description(struct mali_pp_core *core) +{ + return core->hw_core.description; +} + +MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struct mali_pp_core *core) +{ + u32 rawstat_used = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & + MALI200_REG_VAL_IRQ_MASK_USED; + if (0 == rawstat_used) { + return MALI_INTERRUPT_RESULT_NONE; + } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) { + return MALI_INTERRUPT_RESULT_SUCCESS; + } + return MALI_INTERRUPT_RESULT_ERROR; +} + +MALI_STATIC_INLINE u32 mali_pp_get_rawstat(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return mali_hw_core_register_read(&core->hw_core, + MALI200_REG_ADDR_MGMT_INT_RAWSTAT); +} + + +MALI_STATIC_INLINE u32 mali_pp_is_active(struct mali_pp_core *core) +{ + u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS); + return (status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core) +{ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE); +} + +MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core) +{ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); +} + +MALI_STATIC_INLINE void mali_pp_write_addr_renderer_list(struct mali_pp_core *core, + struct mali_pp_job *job, u32 subjob) +{ + u32 addr = mali_pp_job_get_addr_frame(job, subjob); + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, addr); +} + + +MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job) +{ + u32 addr = mali_pp_job_get_addr_stack(job, core->core_id); + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr); +} + +#endif /* __MALI_PP_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_pp_job.c b/drivers/gpu/arm/utgard/common/mali_pp_job.c new file mode 100644 index 000000000000..5528360841af --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pp_job.c @@ -0,0 +1,308 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_pp.h" +#include "mali_pp_job.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_kernel_common.h" +#include "mali_uk_types.h" +#include "mali_executor.h" +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +#include "linux/mali_memory_dma_buf.h" +#endif +#include "mali_memory_swap_alloc.h" +#include "mali_scheduler.h" + +static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */ +static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */ +static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */ +static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER }; +static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER }; + +void mali_pp_job_initialize(void) +{ + _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0); +} + +void mali_pp_job_terminate(void) +{ + _mali_osk_atomic_term(&pp_counter_per_sub_job_count); +} + +struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, + _mali_uk_pp_start_job_s __user *uargs, u32 id) +{ + struct mali_pp_job *job; + u32 perf_counter_flag; + + job = _mali_osk_calloc(1, sizeof(struct mali_pp_job)); + if (NULL != job) { + + _mali_osk_list_init(&job->list); + _mali_osk_list_init(&job->session_fb_lookup_list); + + if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { + goto fail; + } + + if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { + MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); + goto fail; + } + + if (!mali_pp_job_use_no_notification(job)) { + job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s)); + if (NULL == job->finished_notification) goto fail; + } + + perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); + + /* case when no counters came from user space + * so pass the debugfs / DS-5 provided global ones to the job object */ + if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || + (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { + u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count); + + /* These counters apply for all virtual jobs, and where no per sub job counter is specified */ + job->uargs.perf_counter_src0 = pp_counter_src0; + job->uargs.perf_counter_src1 = pp_counter_src1; + + /* We only copy the per sub job array if it is enabled with at least one counter */ + if (0 < sub_job_count) { + job->perf_counter_per_sub_job_count = sub_job_count; + _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0)); + _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1)); + } + } + + job->session = session; + job->id = id; + + job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1; + job->pid = _mali_osk_get_pid(); + job->tid = _mali_osk_get_tid(); + + _mali_osk_atomic_init(&job->sub_jobs_completed, 0); + _mali_osk_atomic_init(&job->sub_job_errors, 0); + job->swap_status = MALI_NO_SWAP_IN; + job->user_notification = MALI_FALSE; + job->num_pp_cores_in_virtual = 0; + + if (job->uargs.num_memory_cookies > session->allocation_mgr.mali_allocation_num) { + MALI_PRINT_ERROR(("Mali PP job: The number of memory cookies is invalid !\n")); + goto fail; + } + + if (job->uargs.num_memory_cookies > 0) { + u32 size; + u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies; + + size = sizeof(*memory_cookies) * (job->uargs.num_memory_cookies); + + job->memory_cookies = _mali_osk_malloc(size); + if (NULL == job->memory_cookies) { + MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size)); + goto fail; + } + + if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) { + MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size)); + goto fail; + } + } + + if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) { + /* Not a valid job. */ + goto fail; + } + + mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job); + mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); + + mali_mem_swap_in_pages(job); + + return job; + } + +fail: + if (NULL != job) { + mali_pp_job_delete(job); + } + + return NULL; +} + +void mali_pp_job_delete(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list)); + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list)); + + if (NULL != job->memory_cookies) { +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) + /* Unmap buffers attached to job */ + mali_dma_buf_unmap_job(job); +#endif + if (MALI_NO_SWAP_IN != job->swap_status) { + mali_mem_swap_out_pages(job); + } + + _mali_osk_free(job->memory_cookies); + } + + if (job->user_notification) { + mali_scheduler_return_pp_job_to_user(job, + job->num_pp_cores_in_virtual); + } + + if (NULL != job->finished_notification) { + _mali_osk_notification_delete(job->finished_notification); + } + + _mali_osk_atomic_term(&job->sub_jobs_completed); + _mali_osk_atomic_term(&job->sub_job_errors); + + _mali_osk_free(job); +} + +void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list) +{ + struct mali_pp_job *iter; + struct mali_pp_job *tmp; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + /* Find position in list/queue where job should be added. */ + _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list, + struct mali_pp_job, list) { + /* job should be started after iter if iter is in progress. */ + if (0 < iter->sub_jobs_started) { + break; + } + + /* + * job should be started after iter if it has a higher + * job id. A span is used to handle job id wrapping. + */ + if ((mali_pp_job_get_id(job) - + mali_pp_job_get_id(iter)) < + MALI_SCHEDULER_JOB_ID_SPAN) { + break; + } + } + + _mali_osk_list_add(&job->list, &iter->list); +} + + +u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job) +{ + /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */ + if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) { + return job->uargs.perf_counter_src0; + } + + /* Use per sub job counter if enabled... */ + if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src0[sub_job]) { + return job->perf_counter_per_sub_job_src0[sub_job]; + } + + /* ...else default to global job counter */ + return job->uargs.perf_counter_src0; +} + +u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job) +{ + /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */ + if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) { + /* Virtual jobs always use the global job counter */ + return job->uargs.perf_counter_src1; + } + + /* Use per sub job counter if enabled... */ + if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src1[sub_job]) { + return job->perf_counter_per_sub_job_src1[sub_job]; + } + + /* ...else default to global job counter */ + return job->uargs.perf_counter_src1; +} + +void mali_pp_job_set_pp_counter_global_src0(u32 counter) +{ + pp_counter_src0 = counter; +} + +void mali_pp_job_set_pp_counter_global_src1(u32 counter) +{ + pp_counter_src1 = counter; +} + +void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter) +{ + MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS); + + if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src0[sub_job]) { + /* increment count since existing counter was disabled */ + _mali_osk_atomic_inc(&pp_counter_per_sub_job_count); + } + + if (MALI_HW_CORE_NO_COUNTER == counter) { + /* decrement count since new counter is disabled */ + _mali_osk_atomic_dec(&pp_counter_per_sub_job_count); + } + + /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */ + + pp_counter_per_sub_job_src0[sub_job] = counter; +} + +void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter) +{ + MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS); + + if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) { + /* increment count since existing counter was disabled */ + _mali_osk_atomic_inc(&pp_counter_per_sub_job_count); + } + + if (MALI_HW_CORE_NO_COUNTER == counter) { + /* decrement count since new counter is disabled */ + _mali_osk_atomic_dec(&pp_counter_per_sub_job_count); + } + + /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */ + + pp_counter_per_sub_job_src1[sub_job] = counter; +} + +u32 mali_pp_job_get_pp_counter_global_src0(void) +{ + return pp_counter_src0; +} + +u32 mali_pp_job_get_pp_counter_global_src1(void) +{ + return pp_counter_src1; +} + +u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job) +{ + MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS); + return pp_counter_per_sub_job_src0[sub_job]; +} + +u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job) +{ + MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS); + return pp_counter_per_sub_job_src1[sub_job]; +} diff --git a/drivers/gpu/arm/utgard/common/mali_pp_job.h b/drivers/gpu/arm/utgard/common/mali_pp_job.h new file mode 100644 index 000000000000..7b9d2efa3019 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_pp_job.h @@ -0,0 +1,574 @@ +/* + * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PP_JOB_H__ +#define __MALI_PP_JOB_H__ + +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_uk_types.h" +#include "mali_session.h" +#include "mali_kernel_common.h" +#include "regs/mali_200_regs.h" +#include "mali_kernel_core.h" +#include "mali_dlbu.h" +#include "mali_timeline.h" +#include "mali_scheduler.h" +#include "mali_executor.h" +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +#include "linux/mali_memory_dma_buf.h" +#endif + +typedef enum pp_job_status { + MALI_NO_SWAP_IN, + MALI_SWAP_IN_FAIL, + MALI_SWAP_IN_SUCC, +} pp_job_status; + +/** + * This structure represents a PP job, including all sub jobs. + * + * The PP job object itself is not protected by any single lock, + * but relies on other locks instead (scheduler, executor and timeline lock). + * Think of the job object as moving between these sub systems through-out + * its lifetime. Different part of the PP job struct is used by different + * subsystems. Accessor functions ensure that correct lock is taken. + * Do NOT access any data members directly from outside this module! + */ +struct mali_pp_job { + /* + * These members are typically only set at creation, + * and only read later on. + * They do not require any lock protection. + */ + _mali_uk_pp_start_job_s uargs; /**< Arguments from user space */ + struct mali_session_data *session; /**< Session which submitted this job */ + u32 pid; /**< Process ID of submitting process */ + u32 tid; /**< Thread ID of submitting thread */ + u32 id; /**< Identifier for this job in kernel space (sequential numbering) */ + u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */ + struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */ + _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */ + u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */ + u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */ + u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */ + u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */ + + pp_job_status swap_status; /**< Used to track each PP job swap status, if fail, we need to drop them in scheduler part */ + mali_bool user_notification; /**< When we deferred delete PP job, we need to judge if we need to send job finish notification to user space */ + u32 num_pp_cores_in_virtual; /**< How many PP cores we have when job finished */ + + /* + * These members are used by both scheduler and executor. + * They are "protected" by atomic operations. + */ + _mali_osk_atomic_t sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */ + _mali_osk_atomic_t sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */ + + /* + * These members are used by scheduler, but only when no one else + * knows about this job object but the working function. + * No lock is thus needed for these. + */ + u32 *memory_cookies; /**< Memory cookies attached to job */ + + /* + * These members are used by the scheduler, + * protected by scheduler lock + */ + _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */ + _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */ + u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */ + + /* + * Set by executor/group on job completion, read by scheduler when + * returning job to user. Hold executor lock when setting, + * no lock needed when reading + */ + u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */ + u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */ +}; + +void mali_pp_job_initialize(void); +void mali_pp_job_terminate(void); + +struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id); +void mali_pp_job_delete(struct mali_pp_job *job); + +u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job); +u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job); + +void mali_pp_job_set_pp_counter_global_src0(u32 counter); +void mali_pp_job_set_pp_counter_global_src1(u32 counter); +void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter); +void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter); + +u32 mali_pp_job_get_pp_counter_global_src0(void); +u32 mali_pp_job_get_pp_counter_global_src1(void); +u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job); +u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job); + +MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (NULL == job) ? 0 : job->id; +} + +MALI_STATIC_INLINE void mali_pp_job_set_cache_order(struct mali_pp_job *job, + u32 cache_order) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + job->cache_order = cache_order; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (NULL == job) ? 0 : job->cache_order; +} + +MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.user_job_ptr; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.frame_builder_id; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.flush_id; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->pid; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->tid; +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.frame_registers; +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.dlbu_registers; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job) +{ +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + MALI_DEBUG_ASSERT_POINTER(job); + return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE; +#else + return MALI_FALSE; +#endif +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + if (mali_pp_job_is_virtual(job)) { + return MALI_DLBU_VIRT_ADDR; + } else if (0 == sub_job) { + return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)]; + } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) { + return job->uargs.frame_registers_addr_frame[sub_job - 1]; + } + + return 0; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + if (0 == sub_job) { + return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)]; + } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) { + return job->uargs.frame_registers_addr_stack[sub_job - 1]; + } + + return 0; +} + +void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list); + +MALI_STATIC_INLINE void mali_pp_job_list_addtail(struct mali_pp_job *job, + _mali_osk_list_t *list) +{ + _mali_osk_list_addtail(&job->list, list); +} + +MALI_STATIC_INLINE void mali_pp_job_list_move(struct mali_pp_job *job, + _mali_osk_list_t *list) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list)); + _mali_osk_list_move(&job->list, list); +} + +MALI_STATIC_INLINE void mali_pp_job_list_remove(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + _mali_osk_list_delinit(&job->list); +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb0_registers; +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb1_registers; +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb2_registers; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_wb0_source_addr(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_wb1_source_addr(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_wb2_source_addr(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]; +} + +MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0; +} + +MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0; +} + +MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + if (job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] || + job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] || + job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] + ) { + /* At least one output unit active */ + return MALI_FALSE; + } + + /* All outputs are disabled - we can abort the job */ + return MALI_TRUE; +} + +MALI_STATIC_INLINE void mali_pp_job_fb_lookup_add(struct mali_pp_job *job) +{ + u32 fb_lookup_id; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + fb_lookup_id = MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id; + + MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id); + + _mali_osk_list_addtail(&job->session_fb_lookup_list, + &job->session->pp_job_fb_lookup_list[fb_lookup_id]); +} + +MALI_STATIC_INLINE void mali_pp_job_fb_lookup_remove(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + _mali_osk_list_delinit(&job->session_fb_lookup_list); +} + +MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->session; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_has_started_sub_jobs(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + return (0 < job->sub_jobs_started) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE; +} + +/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job. + Makes sure that no new subjobs are started. */ +MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job) +{ + u32 jobs_remaining; + u32 i; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + jobs_remaining = job->sub_jobs_num - job->sub_jobs_started; + job->sub_jobs_started += jobs_remaining; + + /* Not the most optimal way, but this is only used in error cases */ + for (i = 0; i < jobs_remaining; i++) { + _mali_osk_atomic_inc(&job->sub_jobs_completed); + _mali_osk_atomic_inc(&job->sub_job_errors); + } +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->sub_jobs_num == + _mali_osk_atomic_read(&job->sub_jobs_completed)) ? + MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + return job->sub_jobs_started; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->sub_jobs_num; +} + +MALI_STATIC_INLINE u32 mali_pp_job_unstarted_sub_job_count(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT(job->sub_jobs_num >= job->sub_jobs_started); + return (job->sub_jobs_num - job->sub_jobs_started); +} + +MALI_STATIC_INLINE u32 mali_pp_job_num_memory_cookies(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.num_memory_cookies; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_memory_cookie( + struct mali_pp_job *job, u32 index) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies); + MALI_DEBUG_ASSERT_POINTER(job->memory_cookies); + return job->memory_cookies[index]; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + if (0 < job->uargs.num_memory_cookies) { + return MALI_TRUE; + } + + return MALI_FALSE; +} + +MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + /* Assert that we are marking the "first unstarted sub job" as started */ + MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job); + + job->sub_jobs_started++; +} + +MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + _mali_osk_atomic_inc(&job->sub_jobs_completed); + if (MALI_FALSE == success) { + _mali_osk_atomic_inc(&job->sub_job_errors); + } +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + if (0 == _mali_osk_atomic_read(&job->sub_job_errors)) { + return MALI_TRUE; + } + return MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification( + struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION) ? + MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_pilot_job(struct mali_pp_job *job) +{ + /* + * A pilot job is currently identified as jobs which + * require no callback notification. + */ + return mali_pp_job_use_no_notification(job); +} + +MALI_STATIC_INLINE _mali_osk_notification_t * +mali_pp_job_get_finished_notification(struct mali_pp_job *job) +{ + _mali_osk_notification_t *notification; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->finished_notification); + + notification = job->finished_notification; + job->finished_notification = NULL; + + return notification; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface( + struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.flags & _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE) + ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.perf_counter_flag; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->perf_counter_value0[sub_job]; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->perf_counter_value1[sub_job]; +} + +MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->perf_counter_value0[sub_job] = value; +} + +MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->perf_counter_value1[sub_job] = value; +} + +MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) { + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +/** + * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted. + * + * @param job Job to check. + * @return MALI_TRUE if job has more than two sub jobs and all sub jobs are unstarted, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job)); + + return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num); +} + +/** + * Get PP job's Timeline tracker. + * + * @param job PP job. + * @return Pointer to Timeline tracker for the job. + */ +MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return &(job->tracker); +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_timeline_point_ptr( + struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr; +} + + +#endif /* __MALI_PP_JOB_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler.c b/drivers/gpu/arm/utgard/common/mali_scheduler.c new file mode 100644 index 000000000000..5547159db94c --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_scheduler.c @@ -0,0 +1,1354 @@ +/* + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_scheduler.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_osk_profiling.h" +#include "mali_kernel_utilization.h" +#include "mali_timeline.h" +#include "mali_gp_job.h" +#include "mali_pp_job.h" +#include "mali_executor.h" +#include "mali_group.h" +#include <linux/wait.h> +#include <linux/sched.h> + + +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include "mali_memory_dma_buf.h" +#endif + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) +#include <linux/sched.h> +#include <trace/events/gpu.h> +#endif +/* + * ---------- static defines/constants ---------- + */ + +/* + * If dma_buf with map on demand is used, we defer job queue + * if in atomic context, since both might sleep. + */ +#if defined(CONFIG_DMA_SHARED_BUFFER) +#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1 +#endif +#endif + + +/* + * ---------- global variables (exported due to inline functions) ---------- + */ + +/* Lock protecting this module */ +_mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL; + +/* Queue of jobs to be executed on the GP group */ +struct mali_scheduler_job_queue job_queue_gp; + +/* Queue of PP jobs */ +struct mali_scheduler_job_queue job_queue_pp; + +_mali_osk_atomic_t mali_job_id_autonumber; +_mali_osk_atomic_t mali_job_cache_order_autonumber; +/* + * ---------- static variables ---------- + */ + +_mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL; +_mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL; +static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue); + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) +static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL; +static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL; +static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list); +#endif + +/* + * ---------- Forward declaration of static functions ---------- + */ + +static mali_timeline_point mali_scheduler_submit_gp_job( + struct mali_session_data *session, struct mali_gp_job *job); +static mali_timeline_point mali_scheduler_submit_pp_job( + struct mali_session_data *session, struct mali_pp_job *job); + +static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job); +static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job); + +static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job, + mali_bool success); + +static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job); +void mali_scheduler_do_pp_job_delete(void *arg); + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) +static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job); +static void mali_scheduler_do_pp_job_queue(void *arg); +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + +/* + * ---------- Actual implementation ---------- + */ + +_mali_osk_errcode_t mali_scheduler_initialize(void) +{ + _mali_osk_atomic_init(&mali_job_id_autonumber, 0); + _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0); + + _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri); + _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri); + job_queue_gp.depth = 0; + job_queue_gp.big_job_num = 0; + + _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri); + _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri); + job_queue_pp.depth = 0; + job_queue_pp.big_job_num = 0; + + mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init( + _MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_SCHEDULER); + if (NULL == mali_scheduler_lock_obj) { + mali_scheduler_terminate(); + } + + scheduler_wq_pp_job_delete = _mali_osk_wq_create_work( + mali_scheduler_do_pp_job_delete, NULL); + if (NULL == scheduler_wq_pp_job_delete) { + mali_scheduler_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init( + _MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED); + if (NULL == scheduler_pp_job_delete_lock) { + mali_scheduler_terminate(); + return _MALI_OSK_ERR_FAULT; + } + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) + scheduler_wq_pp_job_queue = _mali_osk_wq_create_work( + mali_scheduler_do_pp_job_queue, NULL); + if (NULL == scheduler_wq_pp_job_queue) { + mali_scheduler_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init( + _MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED); + if (NULL == scheduler_pp_job_queue_lock) { + mali_scheduler_terminate(); + return _MALI_OSK_ERR_FAULT; + } +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + + return _MALI_OSK_ERR_OK; +} + +void mali_scheduler_terminate(void) +{ +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) + if (NULL != scheduler_pp_job_queue_lock) { + _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock); + scheduler_pp_job_queue_lock = NULL; + } + + if (NULL != scheduler_wq_pp_job_queue) { + _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue); + scheduler_wq_pp_job_queue = NULL; + } +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + + if (NULL != scheduler_pp_job_delete_lock) { + _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock); + scheduler_pp_job_delete_lock = NULL; + } + + if (NULL != scheduler_wq_pp_job_delete) { + _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete); + scheduler_wq_pp_job_delete = NULL; + } + + if (NULL != mali_scheduler_lock_obj) { + _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj); + mali_scheduler_lock_obj = NULL; + } + + _mali_osk_atomic_term(&mali_job_cache_order_autonumber); + _mali_osk_atomic_term(&mali_job_id_autonumber); +} + +u32 mali_scheduler_job_physical_head_count(void) +{ + /* + * Count how many physical sub jobs are present from the head of queue + * until the first virtual job is present. + * Early out when we have reached maximum number of PP cores (8) + */ + u32 count = 0; + struct mali_pp_job *job; + struct mali_pp_job *temp; + + /* Check for partially started normal pri jobs */ + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next, + struct mali_pp_job, list); + + MALI_DEBUG_ASSERT_POINTER(job); + + if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) { + /* + * Remember; virtual jobs can't be queued and started + * at the same time, so this must be a physical job + */ + count += mali_pp_job_unstarted_sub_job_count(job); + if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { + return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + } + } + } + + _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri, + struct mali_pp_job, list) { + if (MALI_FALSE == mali_pp_job_is_virtual(job)) { + count += mali_pp_job_unstarted_sub_job_count(job); + if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { + return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + } + } else { + /* Came across a virtual job, so stop counting */ + return count; + } + } + + _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + if (MALI_FALSE == mali_pp_job_is_virtual(job)) { + /* any partially started is already counted */ + if (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) { + count += mali_pp_job_unstarted_sub_job_count(job); + if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= + count) { + return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + } + } + } else { + /* Came across a virtual job, so stop counting */ + return count; + } + } + + return count; +} + +mali_bool mali_scheduler_job_next_is_virtual(void) +{ + struct mali_pp_job *job; + + job = mali_scheduler_job_pp_virtual_peek(); + if (NULL != job) { + MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job)); + + return MALI_TRUE; + } + + return MALI_FALSE; +} + +struct mali_gp_job *mali_scheduler_job_gp_get(void) +{ + _mali_osk_list_t *queue; + struct mali_gp_job *job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + MALI_DEBUG_ASSERT(0 < job_queue_gp.depth); + MALI_DEBUG_ASSERT(job_queue_gp.big_job_num <= job_queue_gp.depth); + + if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) { + queue = &job_queue_gp.high_pri; + } else { + queue = &job_queue_gp.normal_pri; + MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue)); + } + + job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list); + + MALI_DEBUG_ASSERT_POINTER(job); + + mali_gp_job_list_remove(job); + job_queue_gp.depth--; + if (job->big_job) { + job_queue_gp.big_job_num --; + if (job_queue_gp.big_job_num < MALI_MAX_PENDING_BIG_JOB) { + /* wake up process */ + wait_queue_head_t *queue = mali_session_get_wait_queue(); + wake_up(queue); + } + } + return job; +} + +struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void) +{ + struct mali_pp_job *job = NULL; + struct mali_pp_job *tmp_job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + + /* + * For PP jobs we favour partially started jobs in normal + * priority queue over unstarted jobs in high priority queue + */ + + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next, + struct mali_pp_job, list); + MALI_DEBUG_ASSERT(NULL != tmp_job); + + if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) { + job = tmp_job; + } + } + + if (NULL == job || + MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) { + /* + * There isn't a partially started job in normal queue, so + * look in high priority queue. + */ + if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next, + struct mali_pp_job, list); + MALI_DEBUG_ASSERT(NULL != tmp_job); + + if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) { + job = tmp_job; + } + } + } + + return job; +} + +struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void) +{ + struct mali_pp_job *job = NULL; + struct mali_pp_job *tmp_job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + + if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next, + struct mali_pp_job, list); + + if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) { + job = tmp_job; + } + } + + if (NULL == job) { + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next, + struct mali_pp_job, list); + + if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) { + job = tmp_job; + } + } + } + + return job; +} + +struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job) +{ + struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek(); + + MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job)); + + if (NULL != job) { + *sub_job = mali_pp_job_get_first_unstarted_sub_job(job); + + mali_pp_job_mark_sub_job_started(job, *sub_job); + if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) { + /* Remove from queue when last sub job has been retrieved */ + mali_pp_job_list_remove(job); + } + + job_queue_pp.depth--; + + /* + * Job about to start so it is no longer be + * possible to discard WB + */ + mali_pp_job_fb_lookup_remove(job); + } + + return job; +} + +struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void) +{ + struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek(); + + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job)); + + if (NULL != job) { + MALI_DEBUG_ASSERT(0 == + mali_pp_job_get_first_unstarted_sub_job(job)); + MALI_DEBUG_ASSERT(1 == + mali_pp_job_get_sub_job_count(job)); + + mali_pp_job_mark_sub_job_started(job, 0); + + mali_pp_job_list_remove(job); + + job_queue_pp.depth--; + + /* + * Job about to start so it is no longer be + * possible to discard WB + */ + mali_pp_job_fb_lookup_remove(job); + } + + return job; +} + +mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n", + mali_gp_job_get_id(job), job)); + + mali_scheduler_lock(); + + if (!mali_scheduler_queue_gp_job(job)) { + /* Failed to enqueue job, release job (with error) */ + + mali_scheduler_unlock(); + + mali_timeline_tracker_release(mali_gp_job_get_tracker(job)); + mali_gp_job_signal_pp_tracker(job, MALI_FALSE); + + /* This will notify user space and close the job object */ + mali_scheduler_complete_gp_job(job, MALI_FALSE, + MALI_TRUE, MALI_FALSE); + + return MALI_SCHEDULER_MASK_EMPTY; + } + + mali_scheduler_unlock(); + + return MALI_SCHEDULER_MASK_GP; +} + +mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n", + mali_pp_job_get_id(job), job)); + + if (MALI_TRUE == mali_timeline_tracker_activation_error( + mali_pp_job_get_tracker(job))) { + MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n", + mali_pp_job_get_id(job), job)); + + mali_scheduler_lock(); + mali_pp_job_fb_lookup_remove(job); + mali_pp_job_mark_unstarted_failed(job); + mali_scheduler_unlock(); + + mali_timeline_tracker_release(mali_pp_job_get_tracker(job)); + + /* This will notify user space and close the job object */ + mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE); + + return MALI_SCHEDULER_MASK_EMPTY; + } + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) + if (mali_pp_job_needs_dma_buf_mapping(job)) { + mali_scheduler_deferred_pp_job_queue(job); + return MALI_SCHEDULER_MASK_EMPTY; + } +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + + mali_scheduler_lock(); + + if (!mali_scheduler_queue_pp_job(job)) { + /* Failed to enqueue job, release job (with error) */ + mali_pp_job_fb_lookup_remove(job); + mali_pp_job_mark_unstarted_failed(job); + mali_scheduler_unlock(); + + mali_timeline_tracker_release(mali_pp_job_get_tracker(job)); + + /* This will notify user space and close the job object */ + mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE); + + return MALI_SCHEDULER_MASK_EMPTY; + } + + mali_scheduler_unlock(); + return MALI_SCHEDULER_MASK_PP; +} + +void mali_scheduler_complete_gp_job(struct mali_gp_job *job, + mali_bool success, + mali_bool user_notification, + mali_bool dequeued) +{ + if (user_notification) { + mali_scheduler_return_gp_job_to_user(job, success); + } + + if (dequeued) { + _mali_osk_pm_dev_ref_put(); + + if (mali_utilization_enabled()) { + mali_utilization_gp_end(); + } + } + + mali_gp_job_delete(job); +} + +void mali_scheduler_complete_pp_job(struct mali_pp_job *job, + u32 num_cores_in_virtual, + mali_bool user_notification, + mali_bool dequeued) +{ + job->user_notification = user_notification; + job->num_pp_cores_in_virtual = num_cores_in_virtual; + + if (dequeued) { +#if defined(CONFIG_MALI_DVFS) + if (mali_pp_job_is_window_surface(job)) { + struct mali_session_data *session; + session = mali_pp_job_get_session(job); + mali_session_inc_num_window_jobs(session); + } +#endif + + _mali_osk_pm_dev_ref_put(); + + if (mali_utilization_enabled()) { + mali_utilization_pp_end(); + } + } + + /* With ZRAM feature enabled, all pp jobs will be force to use deferred delete. */ + mali_scheduler_deferred_pp_job_delete(job); +} + +void mali_scheduler_abort_session(struct mali_session_data *session) +{ + struct mali_gp_job *gp_job; + struct mali_gp_job *gp_tmp; + struct mali_pp_job *pp_job; + struct mali_pp_job *pp_tmp; + _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp); + _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp); + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT(session->is_aborting); + + MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n", + session)); + + mali_scheduler_lock(); + + /* Remove from GP normal priority queue */ + _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri, + struct mali_gp_job, list) { + if (mali_gp_job_get_session(gp_job) == session) { + mali_gp_job_list_move(gp_job, &removed_jobs_gp); + job_queue_gp.depth--; + job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0; + } + } + + /* Remove from GP high priority queue */ + _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri, + struct mali_gp_job, list) { + if (mali_gp_job_get_session(gp_job) == session) { + mali_gp_job_list_move(gp_job, &removed_jobs_gp); + job_queue_gp.depth--; + job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0; + } + } + + /* Remove from PP normal priority queue */ + _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, + &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + if (mali_pp_job_get_session(pp_job) == session) { + mali_pp_job_fb_lookup_remove(pp_job); + + job_queue_pp.depth -= + mali_pp_job_unstarted_sub_job_count( + pp_job); + mali_pp_job_mark_unstarted_failed(pp_job); + + if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) { + if (mali_pp_job_is_complete(pp_job)) { + mali_pp_job_list_move(pp_job, + &removed_jobs_pp); + } else { + mali_pp_job_list_remove(pp_job); + } + } + } + } + + /* Remove from PP high priority queue */ + _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, + &job_queue_pp.high_pri, + struct mali_pp_job, list) { + if (mali_pp_job_get_session(pp_job) == session) { + mali_pp_job_fb_lookup_remove(pp_job); + + job_queue_pp.depth -= + mali_pp_job_unstarted_sub_job_count( + pp_job); + mali_pp_job_mark_unstarted_failed(pp_job); + + if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) { + if (mali_pp_job_is_complete(pp_job)) { + mali_pp_job_list_move(pp_job, + &removed_jobs_pp); + } else { + mali_pp_job_list_remove(pp_job); + } + } + } + } + + /* + * Release scheduler lock so we can release trackers + * (which will potentially queue new jobs) + */ + mali_scheduler_unlock(); + + /* Release and complete all (non-running) found GP jobs */ + _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp, + struct mali_gp_job, list) { + mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job)); + mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE); + _mali_osk_list_delinit(&gp_job->list); + mali_scheduler_complete_gp_job(gp_job, + MALI_FALSE, MALI_FALSE, MALI_TRUE); + } + + /* Release and complete non-running PP jobs */ + _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp, + struct mali_pp_job, list) { + mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job)); + _mali_osk_list_delinit(&pp_job->list); + mali_scheduler_complete_pp_job(pp_job, 0, + MALI_FALSE, MALI_TRUE); + } +} + +_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, + _mali_uk_gp_start_job_s *uargs) +{ + struct mali_session_data *session; + struct mali_gp_job *job; + mali_timeline_point point; + u32 __user *point_ptr = NULL; + + MALI_DEBUG_ASSERT_POINTER(uargs); + MALI_DEBUG_ASSERT_POINTER(ctx); + + session = (struct mali_session_data *)(uintptr_t)ctx; + + job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(), + NULL); + if (NULL == job) { + MALI_PRINT_ERROR(("Failed to create GP job.\n")); + return _MALI_OSK_ERR_NOMEM; + } + + point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job); + + point = mali_scheduler_submit_gp_job(session, job); + + if (0 != _mali_osk_put_user(((u32) point), point_ptr)) { + /* + * Let user space know that something failed + * after the job was started. + */ + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, + _mali_uk_pp_start_job_s *uargs) +{ + struct mali_session_data *session; + struct mali_pp_job *job; + mali_timeline_point point; + u32 __user *point_ptr = NULL; + + MALI_DEBUG_ASSERT_POINTER(uargs); + MALI_DEBUG_ASSERT_POINTER(ctx); + + session = (struct mali_session_data *)(uintptr_t)ctx; + + job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id()); + if (NULL == job) { + MALI_PRINT_ERROR(("Failed to create PP job.\n")); + return _MALI_OSK_ERR_NOMEM; + } + + point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job); + + point = mali_scheduler_submit_pp_job(session, job); + job = NULL; + + if (0 != _mali_osk_put_user(((u32) point), point_ptr)) { + /* + * Let user space know that something failed + * after the job was started. + */ + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, + _mali_uk_pp_and_gp_start_job_s *uargs) +{ + struct mali_session_data *session; + _mali_uk_pp_and_gp_start_job_s kargs; + struct mali_pp_job *pp_job; + struct mali_gp_job *gp_job; + u32 __user *point_ptr = NULL; + mali_timeline_point point; + _mali_uk_pp_start_job_s __user *pp_args; + _mali_uk_gp_start_job_s __user *gp_args; + + MALI_DEBUG_ASSERT_POINTER(ctx); + MALI_DEBUG_ASSERT_POINTER(uargs); + + session = (struct mali_session_data *) ctx; + + if (0 != _mali_osk_copy_from_user(&kargs, uargs, + sizeof(_mali_uk_pp_and_gp_start_job_s))) { + return _MALI_OSK_ERR_NOMEM; + } + + pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args; + gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args; + + pp_job = mali_pp_job_create(session, pp_args, + mali_scheduler_get_new_id()); + if (NULL == pp_job) { + MALI_PRINT_ERROR(("Failed to create PP job.\n")); + return _MALI_OSK_ERR_NOMEM; + } + + gp_job = mali_gp_job_create(session, gp_args, + mali_scheduler_get_new_id(), + mali_pp_job_get_tracker(pp_job)); + if (NULL == gp_job) { + MALI_PRINT_ERROR(("Failed to create GP job.\n")); + mali_pp_job_delete(pp_job); + return _MALI_OSK_ERR_NOMEM; + } + + point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job); + + /* Submit GP job. */ + mali_scheduler_submit_gp_job(session, gp_job); + gp_job = NULL; + + /* Submit PP job. */ + point = mali_scheduler_submit_pp_job(session, pp_job); + pp_job = NULL; + + if (0 != _mali_osk_put_user(((u32) point), point_ptr)) { + /* + * Let user space know that something failed + * after the jobs were started. + */ + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + return _MALI_OSK_ERR_OK; +} + +void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args) +{ + struct mali_session_data *session; + struct mali_pp_job *job; + struct mali_pp_job *tmp; + u32 fb_lookup_id; + + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + + fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK; + + mali_scheduler_lock(); + + /* Iterate over all jobs for given frame builder_id. */ + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, + &session->pp_job_fb_lookup_list[fb_lookup_id], + struct mali_pp_job, session_fb_lookup_list) { + MALI_DEBUG_CODE(u32 disable_mask = 0); + + if (mali_pp_job_get_frame_builder_id(job) != + (u32) args->fb_id) { + MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n")); + continue; + } + + MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3)); + + if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) { + MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1)); + mali_pp_job_disable_wb0(job); + } + + if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) { + MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2)); + mali_pp_job_disable_wb1(job); + } + + if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) { + MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3)); + mali_pp_job_disable_wb2(job); + } + MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n", + disable_mask)); + } + + mali_scheduler_unlock(); +} + +#if MALI_STATE_TRACKING +u32 mali_scheduler_dump_state(char *buf, u32 size) +{ + int n = 0; + + n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n"); + n += _mali_osk_snprintf(buf + n, size - n, + "\tQueue depth: %u\n", job_queue_gp.depth); + n += _mali_osk_snprintf(buf + n, size - n, + "\tNormal priority queue is %s\n", + _mali_osk_list_empty(&job_queue_gp.normal_pri) ? + "empty" : "not empty"); + n += _mali_osk_snprintf(buf + n, size - n, + "\tHigh priority queue is %s\n", + _mali_osk_list_empty(&job_queue_gp.high_pri) ? + "empty" : "not empty"); + + n += _mali_osk_snprintf(buf + n, size - n, + "PP queues\n"); + n += _mali_osk_snprintf(buf + n, size - n, + "\tQueue depth: %u\n", job_queue_pp.depth); + n += _mali_osk_snprintf(buf + n, size - n, + "\tNormal priority queue is %s\n", + _mali_osk_list_empty(&job_queue_pp.normal_pri) + ? "empty" : "not empty"); + n += _mali_osk_snprintf(buf + n, size - n, + "\tHigh priority queue is %s\n", + _mali_osk_list_empty(&job_queue_pp.high_pri) + ? "empty" : "not empty"); + + n += _mali_osk_snprintf(buf + n, size - n, "\n"); + + return n; +} +#endif + +/* + * ---------- Implementation of static functions ---------- + */ + +static mali_timeline_point mali_scheduler_submit_gp_job( + struct mali_session_data *session, struct mali_gp_job *job) +{ + mali_timeline_point point; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_POINTER(job); + + /* Add job to Timeline system. */ + point = mali_timeline_system_add_tracker(session->timeline_system, + mali_gp_job_get_tracker(job), MALI_TIMELINE_GP); + + return point; +} + +static mali_timeline_point mali_scheduler_submit_pp_job( + struct mali_session_data *session, struct mali_pp_job *job) +{ + mali_timeline_point point; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_POINTER(job); + + mali_scheduler_lock(); + /* + * Adding job to the lookup list used to quickly discard + * writeback units of queued jobs. + */ + mali_pp_job_fb_lookup_add(job); + mali_scheduler_unlock(); + + /* Add job to Timeline system. */ + point = mali_timeline_system_add_tracker(session->timeline_system, + mali_pp_job_get_tracker(job), MALI_TIMELINE_PP); + + return point; +} + +static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job) +{ + struct mali_session_data *session; + _mali_osk_list_t *queue; + + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(job); + + session = mali_gp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + if (unlikely(session->is_aborting)) { + MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n", + mali_gp_job_get_id(job), job)); + return MALI_FALSE; /* job not queued */ + } + + mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order()); + + /* Determine which queue the job should be added to. */ + if (session->use_high_priority_job_queue) { + queue = &job_queue_gp.high_pri; + } else { + queue = &job_queue_gp.normal_pri; + } + + job_queue_gp.depth += 1; + job_queue_gp.big_job_num += (job->big_job) ? 1 : 0; + + /* Add job to queue (mali_gp_job_queue_add find correct place). */ + mali_gp_job_list_add(job, queue); + + /* + * We hold a PM reference for every job we hold queued (and running) + * It is important that we take this reference after job has been + * added the the queue so that any runtime resume could schedule this + * job right there and then. + */ + _mali_osk_pm_dev_ref_get_async(); + + if (mali_utilization_enabled()) { + /* + * We cheat a little bit by counting the GP as busy from the + * time a GP job is queued. This will be fine because we only + * loose the tiny idle gap between jobs, but we will instead + * get less utilization work to do (less locks taken) + */ + mali_utilization_gp_start(); + } + + /* Add profiling events for job enqueued */ + _mali_osk_profiling_add_event( + MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE, + mali_gp_job_get_pid(job), + mali_gp_job_get_tid(job), + mali_gp_job_get_frame_builder_id(job), + mali_gp_job_get_flush_id(job), + 0); + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_job_enqueue(mali_gp_job_get_tid(job), + mali_gp_job_get_id(job), "GP"); +#endif + + MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", + mali_gp_job_get_id(job), job)); + + return MALI_TRUE; /* job queued */ +} + +static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job) +{ + struct mali_session_data *session; + _mali_osk_list_t *queue = NULL; + + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(job); + + session = mali_pp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + if (unlikely(session->is_aborting)) { + MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", + mali_pp_job_get_id(job), job)); + return MALI_FALSE; /* job not queued */ + } else if (unlikely(MALI_SWAP_IN_FAIL == job->swap_status)) { + MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while swap in failed.\n", + mali_pp_job_get_id(job), job)); + return MALI_FALSE; + } + + mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order()); + + if (session->use_high_priority_job_queue) { + queue = &job_queue_pp.high_pri; + } else { + queue = &job_queue_pp.normal_pri; + } + + job_queue_pp.depth += + mali_pp_job_get_sub_job_count(job); + + /* Add job to queue (mali_gp_job_queue_add find correct place). */ + mali_pp_job_list_add(job, queue); + + /* + * We hold a PM reference for every job we hold queued (and running) + * It is important that we take this reference after job has been + * added the the queue so that any runtime resume could schedule this + * job right there and then. + */ + _mali_osk_pm_dev_ref_get_async(); + + if (mali_utilization_enabled()) { + /* + * We cheat a little bit by counting the PP as busy from the + * time a PP job is queued. This will be fine because we only + * loose the tiny idle gap between jobs, but we will instead + * get less utilization work to do (less locks taken) + */ + mali_utilization_pp_start(); + } + + /* Add profiling events for job enqueued */ + + _mali_osk_profiling_add_event( + MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE, + mali_pp_job_get_pid(job), + mali_pp_job_get_tid(job), + mali_pp_job_get_frame_builder_id(job), + mali_pp_job_get_flush_id(job), + 0); + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_job_enqueue(mali_pp_job_get_tid(job), + mali_pp_job_get_id(job), "PP"); +#endif + + MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n", + mali_pp_job_is_virtual(job) + ? "Virtual" : "Physical", + mali_pp_job_get_id(job), job, + mali_pp_job_get_sub_job_count(job))); + + return MALI_TRUE; /* job queued */ +} + +static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job, + mali_bool success) +{ + _mali_uk_gp_job_finished_s *jobres; + struct mali_session_data *session; + _mali_osk_notification_t *notification; + + MALI_DEBUG_ASSERT_POINTER(job); + + session = mali_gp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + notification = mali_gp_job_get_finished_notification(job); + MALI_DEBUG_ASSERT_POINTER(notification); + + jobres = notification->result_buffer; + MALI_DEBUG_ASSERT_POINTER(jobres); + + jobres->pending_big_job_num = mali_scheduler_job_gp_big_job_count(); + + jobres->user_job_ptr = mali_gp_job_get_user_id(job); + if (MALI_TRUE == success) { + jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS; + } else { + jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR; + } + jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job); + jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job); + jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job); + + mali_session_send_notification(session, notification); +} + +void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, + u32 num_cores_in_virtual) +{ + u32 i; + u32 num_counters_to_copy; + _mali_uk_pp_job_finished_s *jobres; + struct mali_session_data *session; + _mali_osk_notification_t *notification; + + if (MALI_TRUE == mali_pp_job_use_no_notification(job)) { + return; + } + + MALI_DEBUG_ASSERT_POINTER(job); + + session = mali_pp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + notification = mali_pp_job_get_finished_notification(job); + MALI_DEBUG_ASSERT_POINTER(notification); + + jobres = notification->result_buffer; + MALI_DEBUG_ASSERT_POINTER(jobres); + + jobres->user_job_ptr = mali_pp_job_get_user_id(job); + if (MALI_TRUE == mali_pp_job_was_success(job)) { + jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS; + } else { + jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR; + } + + if (mali_pp_job_is_virtual(job)) { + num_counters_to_copy = num_cores_in_virtual; + } else { + num_counters_to_copy = mali_pp_job_get_sub_job_count(job); + } + + for (i = 0; i < num_counters_to_copy; i++) { + jobres->perf_counter0[i] = + mali_pp_job_get_perf_counter_value0(job, i); + jobres->perf_counter1[i] = + mali_pp_job_get_perf_counter_value1(job, i); + jobres->perf_counter_src0 = + mali_pp_job_get_pp_counter_global_src0(); + jobres->perf_counter_src1 = + mali_pp_job_get_pp_counter_global_src1(); + } + + mali_session_send_notification(session, notification); +} + +static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock); + mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue); + _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock); + + _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete); +} + +void mali_scheduler_do_pp_job_delete(void *arg) +{ + _MALI_OSK_LIST_HEAD_STATIC_INIT(list); + struct mali_pp_job *job; + struct mali_pp_job *tmp; + + MALI_IGNORE(arg); + + /* + * Quickly "unhook" the jobs pending to be deleted, so we can release + * the lock before we start deleting the job objects + * (without any locks held) + */ + _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock); + _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list); + _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock); + + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, + struct mali_pp_job, list) { + _mali_osk_list_delinit(&job->list); + + mali_pp_job_delete(job); /* delete the job object itself */ + } +} + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) + +static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock); + mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list); + _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock); + + _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue); +} + +static void mali_scheduler_do_pp_job_queue(void *arg) +{ + _MALI_OSK_LIST_HEAD_STATIC_INIT(list); + struct mali_pp_job *job; + struct mali_pp_job *tmp; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_IGNORE(arg); + + /* + * Quickly "unhook" the jobs pending to be queued, so we can release + * the lock before we start queueing the job objects + * (without any locks held) + */ + _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock); + _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list); + _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock); + + /* First loop through all jobs and do the pre-work (no locks needed) */ + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, + struct mali_pp_job, list) { + if (mali_pp_job_needs_dma_buf_mapping(job)) { + /* + * This operation could fail, but we continue anyway, + * because the worst that could happen is that this + * job will fail due to a Mali page fault. + */ + mali_dma_buf_map_job(job); + } + } + + mali_scheduler_lock(); + + /* Then loop through all jobs again to queue them (lock needed) */ + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, + struct mali_pp_job, list) { + + /* Remove from scheduler_pp_job_queue_list before queueing */ + mali_pp_job_list_remove(job); + + if (mali_scheduler_queue_pp_job(job)) { + /* Job queued successfully */ + schedule_mask |= MALI_SCHEDULER_MASK_PP; + } else { + /* Failed to enqueue job, release job (with error) */ + mali_pp_job_fb_lookup_remove(job); + mali_pp_job_mark_unstarted_failed(job); + + /* unlock scheduler in this uncommon case */ + mali_scheduler_unlock(); + + schedule_mask |= mali_timeline_tracker_release( + mali_pp_job_get_tracker(job)); + + /* Notify user space and close the job object */ + mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, + MALI_FALSE); + + mali_scheduler_lock(); + } + } + + mali_scheduler_unlock(); + + /* Trigger scheduling of jobs */ + mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE); +} + +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + +void mali_scheduler_gp_pp_job_queue_print(void) +{ + struct mali_gp_job *gp_job = NULL; + struct mali_gp_job *tmp_gp_job = NULL; + struct mali_pp_job *pp_job = NULL; + struct mali_pp_job *tmp_pp_job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj); + + /* dump job queup status */ + if ((0 == job_queue_gp.depth) && (0 == job_queue_pp.depth)) { + MALI_PRINT(("No GP&PP job in the job queue.\n")); + return; + } + + MALI_PRINT(("Total (%d) GP job in the job queue.\n", job_queue_gp.depth)); + if (job_queue_gp.depth > 0) { + if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.high_pri, + struct mali_gp_job, list) { + MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job high_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid)); + } + } + + if (!_mali_osk_list_empty(&job_queue_gp.normal_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.normal_pri, + struct mali_gp_job, list) { + MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job normal_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid)); + } + } + } + + MALI_PRINT(("Total (%d) PP job in the job queue.\n", job_queue_pp.depth)); + if (job_queue_pp.depth > 0) { + if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.high_pri, + struct mali_pp_job, list) { + if (mali_pp_job_is_virtual(pp_job)) { + MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } else { + MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } + } + } + + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + if (mali_pp_job_is_virtual(pp_job)) { + MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } else { + MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } + } + } + } + + /* dump group running job status */ + mali_executor_running_status_print(); +} diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler.h b/drivers/gpu/arm/utgard/common/mali_scheduler.h new file mode 100644 index 000000000000..f24cf42b8a79 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_scheduler.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SCHEDULER_H__ +#define __MALI_SCHEDULER_H__ + +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_scheduler_types.h" +#include "mali_session.h" + +struct mali_scheduler_job_queue { + _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */ + _MALI_OSK_LIST_HEAD(high_pri); /* Queued jobs with high priority */ + u32 depth; /* Depth of combined queues. */ + u32 big_job_num; +}; + +extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj; + +/* Queue of jobs to be executed on the GP group */ +extern struct mali_scheduler_job_queue job_queue_gp; + +/* Queue of PP jobs */ +extern struct mali_scheduler_job_queue job_queue_pp; + +extern _mali_osk_atomic_t mali_job_id_autonumber; +extern _mali_osk_atomic_t mali_job_cache_order_autonumber; + +#define MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + +_mali_osk_errcode_t mali_scheduler_initialize(void); +void mali_scheduler_terminate(void); + +MALI_STATIC_INLINE void mali_scheduler_lock(void) +{ + _mali_osk_spinlock_irq_lock(mali_scheduler_lock_obj); + MALI_DEBUG_PRINT(5, ("Mali scheduler: scheduler lock taken.\n")); +} + +MALI_STATIC_INLINE void mali_scheduler_unlock(void) +{ + MALI_DEBUG_PRINT(5, ("Mali scheduler: Releasing scheduler lock.\n")); + _mali_osk_spinlock_irq_unlock(mali_scheduler_lock_obj); +} + +MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void) +{ + return job_queue_gp.depth; +} +MALI_STATIC_INLINE u32 mali_scheduler_job_gp_big_job_count(void) +{ + return job_queue_gp.big_job_num; +} + +u32 mali_scheduler_job_physical_head_count(void); + +mali_bool mali_scheduler_job_next_is_virtual(void); + +struct mali_gp_job *mali_scheduler_job_gp_get(void); +struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void); +struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void); +struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job); +struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void); + +MALI_STATIC_INLINE u32 mali_scheduler_get_new_id(void) +{ + return _mali_osk_atomic_inc_return(&mali_job_id_autonumber); +} + +MALI_STATIC_INLINE u32 mali_scheduler_get_new_cache_order(void) +{ + return _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber); +} + +/** + * @brief Used by the Timeline system to queue a GP job. + * + * @note @ref mali_executor_schedule_from_mask() should be called if this + * function returns non-zero. + * + * @param job The GP job that is being activated. + * + * @return A scheduling bitmask that can be used to decide if scheduling is + * necessary after this call. + */ +mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job); + +/** + * @brief Used by the Timeline system to queue a PP job. + * + * @note @ref mali_executor_schedule_from_mask() should be called if this + * function returns non-zero. + * + * @param job The PP job that is being activated. + * + * @return A scheduling bitmask that can be used to decide if scheduling is + * necessary after this call. + */ +mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job); + +void mali_scheduler_complete_gp_job(struct mali_gp_job *job, + mali_bool success, + mali_bool user_notification, + mali_bool dequeued); + +void mali_scheduler_complete_pp_job(struct mali_pp_job *job, + u32 num_cores_in_virtual, + mali_bool user_notification, + mali_bool dequeued); + +void mali_scheduler_abort_session(struct mali_session_data *session); + +void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, + u32 num_cores_in_virtual); + +#if MALI_STATE_TRACKING +u32 mali_scheduler_dump_state(char *buf, u32 size); +#endif + +void mali_scheduler_gp_pp_job_queue_print(void); + +#endif /* __MALI_SCHEDULER_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler_types.h b/drivers/gpu/arm/utgard/common/mali_scheduler_types.h new file mode 100644 index 000000000000..f862961d146e --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_scheduler_types.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SCHEDULER_TYPES_H__ +#define __MALI_SCHEDULER_TYPES_H__ + +#include "mali_osk.h" + +#define MALI_SCHEDULER_JOB_ID_SPAN 65535 + +/** + * Bitmask used for defered scheduling of subsystems. + */ +typedef u32 mali_scheduler_mask; + +#define MALI_SCHEDULER_MASK_GP (1<<0) +#define MALI_SCHEDULER_MASK_PP (1<<1) + +#define MALI_SCHEDULER_MASK_EMPTY 0 +#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP) + +#endif /* __MALI_SCHEDULER_TYPES_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_session.c b/drivers/gpu/arm/utgard/common/mali_session.c new file mode 100644 index 000000000000..e0a2805b13d3 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_session.c @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_session.h" +#include "mali_ukk.h" +#ifdef MALI_MEM_SWAP_TRACKING +#include "mali_memory_swap_alloc.h" +#endif + +_MALI_OSK_LIST_HEAD(mali_sessions); +static u32 mali_session_count = 0; + +_mali_osk_spinlock_irq_t *mali_sessions_lock = NULL; +wait_queue_head_t pending_queue; + +_mali_osk_errcode_t mali_session_initialize(void) +{ + _MALI_OSK_INIT_LIST_HEAD(&mali_sessions); + /* init wait queue for big varying job */ + init_waitqueue_head(&pending_queue); + + mali_sessions_lock = _mali_osk_spinlock_irq_init( + _MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_SESSIONS); + if (NULL == mali_sessions_lock) { + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_session_terminate(void) +{ + if (NULL != mali_sessions_lock) { + _mali_osk_spinlock_irq_term(mali_sessions_lock); + mali_sessions_lock = NULL; + } +} + +void mali_session_add(struct mali_session_data *session) +{ + mali_session_lock(); + _mali_osk_list_add(&session->link, &mali_sessions); + mali_session_count++; + mali_session_unlock(); +} + +void mali_session_remove(struct mali_session_data *session) +{ + mali_session_lock(); + _mali_osk_list_delinit(&session->link); + mali_session_count--; + mali_session_unlock(); +} + +u32 mali_session_get_count(void) +{ + return mali_session_count; +} + +wait_queue_head_t *mali_session_get_wait_queue(void) +{ + return &pending_queue; +} + +/* + * Get the max completed window jobs from all active session, + * which will be used in window render frame per sec calculate + */ +#if defined(CONFIG_MALI_DVFS) +u32 mali_session_max_window_num(void) +{ + struct mali_session_data *session, *tmp; + u32 max_window_num = 0; + u32 tmp_number = 0; + + mali_session_lock(); + + MALI_SESSION_FOREACH(session, tmp, link) { + tmp_number = _mali_osk_atomic_xchg( + &session->number_of_window_jobs, 0); + if (max_window_num < tmp_number) { + max_window_num = tmp_number; + } + } + + mali_session_unlock(); + + return max_window_num; +} +#endif + +void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx) +{ + struct mali_session_data *session, *tmp; + u32 mali_mem_usage; + u32 total_mali_mem_size; +#ifdef MALI_MEM_SWAP_TRACKING + u32 swap_pool_size; + u32 swap_unlock_size; +#endif + + MALI_DEBUG_ASSERT_POINTER(print_ctx); + mali_session_lock(); + MALI_SESSION_FOREACH(session, tmp, link) { +#ifdef MALI_MEM_SWAP_TRACKING + _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u %-10u\n", + session->comm, session->pid, + (atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE, + session->max_mali_mem_allocated_size, + (atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE, + (atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE, + (atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE, + (atomic_read(&session->mali_mem_array[MALI_MEM_SWAP])) * _MALI_OSK_MALI_PAGE_SIZE + ); +#else + _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u \n", + session->comm, session->pid, + (atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE, + session->max_mali_mem_allocated_size, + (atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE, + (atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE, + (atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE + ); +#endif + } + mali_session_unlock(); + mali_mem_usage = _mali_ukk_report_memory_usage(); + total_mali_mem_size = _mali_ukk_report_total_memory_size(); + _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size); +#ifdef MALI_MEM_SWAP_TRACKING + mali_mem_swap_tracking(&swap_pool_size, &swap_unlock_size); + _mali_osk_ctxprintf(print_ctx, "Mali swap mem pool : %u\nMali swap mem unlock: %u\n", swap_pool_size, swap_unlock_size); +#endif +} diff --git a/drivers/gpu/arm/utgard/common/mali_session.h b/drivers/gpu/arm/utgard/common/mali_session.h new file mode 100644 index 000000000000..6791b2b5f110 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_session.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SESSION_H__ +#define __MALI_SESSION_H__ + +#include "mali_mmu_page_directory.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_memory_types.h" +#include "mali_memory_manager.h" + +struct mali_timeline_system; +struct mali_soft_system; + +/* Number of frame builder job lists per session. */ +#define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16 +#define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1) +/*Max pending big job allowed in kernel*/ +#define MALI_MAX_PENDING_BIG_JOB (2) + +struct mali_session_data { + _mali_osk_notification_queue_t *ioctl_queue; + + _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */ +#if 0 + _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */ +#endif + struct mali_page_directory *page_directory; /**< MMU page directory for this session */ + + _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */ + _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */ + +#if defined(CONFIG_MALI_DVFS) + _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */ +#endif + + _mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id. Used to link jobs from same frame builder. */ + + struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */ + struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */ + + mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */ + mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */ + u32 pid; + char *comm; + atomic_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record mem types' usage for this session. */ + atomic_t mali_mem_allocated_pages; /** The current allocated mali memory pages, which include mali os memory and mali dedicated memory.*/ + size_t max_mali_mem_allocated_size; /**< The past max mali memory allocated size, which include mali os memory and mali dedicated memory. */ + /* Added for new memroy system */ + struct mali_allocation_manager allocation_mgr; +}; + +_mali_osk_errcode_t mali_session_initialize(void); +void mali_session_terminate(void); + +/* List of all sessions. Actual list head in mali_kernel_core.c */ +extern _mali_osk_list_t mali_sessions; +/* Lock to protect modification and access to the mali_sessions list */ +extern _mali_osk_spinlock_irq_t *mali_sessions_lock; + +MALI_STATIC_INLINE void mali_session_lock(void) +{ + _mali_osk_spinlock_irq_lock(mali_sessions_lock); +} + +MALI_STATIC_INLINE void mali_session_unlock(void) +{ + _mali_osk_spinlock_irq_unlock(mali_sessions_lock); +} + +void mali_session_add(struct mali_session_data *session); +void mali_session_remove(struct mali_session_data *session); +u32 mali_session_get_count(void); +wait_queue_head_t *mali_session_get_wait_queue(void); + +#define MALI_SESSION_FOREACH(session, tmp, link) \ + _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link) + +MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session) +{ + return session->page_directory; +} + +MALI_STATIC_INLINE void mali_session_memory_lock(struct mali_session_data *session) +{ + MALI_DEBUG_ASSERT_POINTER(session); + _mali_osk_mutex_wait(session->memory_lock); +} + +MALI_STATIC_INLINE void mali_session_memory_unlock(struct mali_session_data *session) +{ + MALI_DEBUG_ASSERT_POINTER(session); + _mali_osk_mutex_signal(session->memory_lock); +} + +MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object) +{ + _mali_osk_notification_queue_send(session->ioctl_queue, object); +} + +#if defined(CONFIG_MALI_DVFS) + +MALI_STATIC_INLINE void mali_session_inc_num_window_jobs(struct mali_session_data *session) +{ + MALI_DEBUG_ASSERT_POINTER(session); + _mali_osk_atomic_inc(&session->number_of_window_jobs); +} + +/* + * Get the max completed window jobs from all active session, + * which will be used in window render frame per sec calculate + */ +u32 mali_session_max_window_num(void); + +#endif + +void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx); + +#endif /* __MALI_SESSION_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_soft_job.c b/drivers/gpu/arm/utgard/common/mali_soft_job.c new file mode 100644 index 000000000000..36ac982e1df0 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_soft_job.c @@ -0,0 +1,438 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_soft_job.h" +#include "mali_osk.h" +#include "mali_timeline.h" +#include "mali_session.h" +#include "mali_kernel_common.h" +#include "mali_uk_types.h" +#include "mali_scheduler.h" +#include "mali_executor.h" + +MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system) +{ + MALI_DEBUG_ASSERT_POINTER(system); + _mali_osk_spinlock_irq_lock(system->lock); + MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system)); + MALI_DEBUG_ASSERT(0 == system->lock_owner); + MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid()); +} + +MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system) +{ + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system)); + MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner); + MALI_DEBUG_CODE(system->lock_owner = 0); + _mali_osk_spinlock_irq_unlock(system->lock); +} + +#if defined(DEBUG) +MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system) +{ + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner); +} +#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) mali_soft_job_system_assert_locked(system) +#else +#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) +#endif /* defined(DEBUG) */ + +struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session) +{ + struct mali_soft_job_system *system; + + MALI_DEBUG_ASSERT_POINTER(session); + + system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system)); + if (NULL == system) { + return NULL; + } + + system->session = session; + + system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER); + if (NULL == system->lock) { + mali_soft_job_system_destroy(system); + return NULL; + } + system->lock_owner = 0; + system->last_job_id = 0; + + _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used)); + + return system; +} + +void mali_soft_job_system_destroy(struct mali_soft_job_system *system) +{ + MALI_DEBUG_ASSERT_POINTER(system); + + /* All jobs should be free at this point. */ + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used))); + + if (NULL != system) { + if (NULL != system->lock) { + _mali_osk_spinlock_irq_term(system->lock); + } + _mali_osk_free(system); + } +} + +static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(system); + + mali_soft_job_system_lock(job->system); + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id); + MALI_DEBUG_ASSERT(system == job->system); + + _mali_osk_list_del(&(job->system_list)); + + mali_soft_job_system_unlock(job->system); + + _mali_osk_free(job); +} + +MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id) +{ + struct mali_soft_job *job, *tmp; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system); + + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) { + if (job->id == job_id) + return job; + } + + return NULL; +} + +void mali_soft_job_destroy(struct mali_soft_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->system); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job)); + + if (NULL != job) { + if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return; + + _mali_osk_atomic_term(&job->refcount); + + if (NULL != job->activated_notification) { + _mali_osk_notification_delete(job->activated_notification); + job->activated_notification = NULL; + } + + mali_soft_job_system_free_job(job->system, job); + } +} + +struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job) +{ + struct mali_soft_job *job; + _mali_osk_notification_t *notification = NULL; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) || + (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type)); + + notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s)); + if (unlikely(NULL == notification)) { + MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification")); + return NULL; + } + + job = _mali_osk_malloc(sizeof(struct mali_soft_job)); + if (unlikely(NULL == job)) { + MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n")); + return NULL; + } + + mali_soft_job_system_lock(system); + + job->system = system; + job->id = system->last_job_id++; + job->state = MALI_SOFT_JOB_STATE_ALLOCATED; + + _mali_osk_list_add(&(job->system_list), &(system->jobs_used)); + + job->type = type; + job->user_job = user_job; + job->activated = MALI_FALSE; + + job->activated_notification = notification; + + _mali_osk_atomic_init(&job->refcount, 1); + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state); + MALI_DEBUG_ASSERT(system == job->system); + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id); + + mali_soft_job_system_unlock(system); + + return job; +} + +mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence) +{ + mali_timeline_point point; + struct mali_soft_job_system *system; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(fence); + + MALI_DEBUG_ASSERT_POINTER(job->system); + system = job->system; + + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT_POINTER(system->session->timeline_system); + + mali_soft_job_system_lock(system); + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state); + job->state = MALI_SOFT_JOB_STATE_STARTED; + + mali_soft_job_system_unlock(system); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: starting soft job %u (0x%08X)\n", job->id, job)); + + mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_SOFT, fence, job); + point = mali_timeline_system_add_tracker(system->session->timeline_system, &job->tracker, MALI_TIMELINE_SOFT); + + return point; +} + +static mali_bool mali_soft_job_is_activated(void *data) +{ + struct mali_soft_job *job; + + job = (struct mali_soft_job *) data; + MALI_DEBUG_ASSERT_POINTER(job); + + return job->activated; +} + +_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id) +{ + struct mali_soft_job *job; + struct mali_timeline_system *timeline_system; + mali_scheduler_mask schedule_mask; + + MALI_DEBUG_ASSERT_POINTER(system); + + mali_soft_job_system_lock(system); + + job = mali_soft_job_system_lookup_job(system, job_id); + + if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type) + || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) { + mali_soft_job_system_unlock(system); + MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id)); + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) { + job->state = MALI_SOFT_JOB_STATE_SIGNALED; + mali_soft_job_system_unlock(system); + + MALI_DEBUG_ASSERT(MALI_TRUE == job->activated); + MALI_DEBUG_PRINT(4, ("Mali Soft Job: soft job %u (0x%08X) was timed out\n", job->id, job)); + mali_soft_job_destroy(job); + + return _MALI_OSK_ERR_TIMEOUT; + } + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state); + + job->state = MALI_SOFT_JOB_STATE_SIGNALED; + mali_soft_job_system_unlock(system); + + /* Since the job now is in signaled state, timeouts from the timeline system will be + * ignored, and it is not possible to signal this job again. */ + + timeline_system = system->session->timeline_system; + MALI_DEBUG_ASSERT_POINTER(timeline_system); + + /* Wait until activated. */ + _mali_osk_wait_queue_wait_event(timeline_system->wait_queue, mali_soft_job_is_activated, (void *) job); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job)); + + schedule_mask = mali_timeline_tracker_release(&job->tracker); + mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE); + + mali_soft_job_destroy(job); + + return _MALI_OSK_ERR_OK; +} + +static void mali_soft_job_send_activated_notification(struct mali_soft_job *job) +{ + if (NULL != job->activated_notification) { + _mali_uk_soft_job_activated_s *res = job->activated_notification->result_buffer; + res->user_job = job->user_job; + mali_session_send_notification(job->system->session, job->activated_notification); + } + job->activated_notification = NULL; +} + +mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->system); + MALI_DEBUG_ASSERT_POINTER(job->system->session); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job)); + + mali_soft_job_system_lock(job->system); + + if (unlikely(job->system->session->is_aborting)) { + MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job)); + + mali_soft_job_system_unlock(job->system); + + /* Since we are in shutdown, we can ignore the scheduling bitmask. */ + mali_timeline_tracker_release(&job->tracker); + mali_soft_job_destroy(job); + return schedule_mask; + } + + /* Send activated notification. */ + mali_soft_job_send_activated_notification(job); + + /* Wake up sleeping signaler. */ + job->activated = MALI_TRUE; + + /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */ + if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) { + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state); + + job->state = MALI_SOFT_JOB_STATE_SIGNALED; + mali_soft_job_system_unlock(job->system); + + schedule_mask |= mali_timeline_tracker_release(&job->tracker); + + mali_soft_job_destroy(job); + } else { + _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue); + + mali_soft_job_system_unlock(job->system); + } + + return schedule_mask; +} + +mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->system); + MALI_DEBUG_ASSERT_POINTER(job->system->session); + MALI_DEBUG_ASSERT(MALI_TRUE == job->activated); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline timeout for soft job %u (0x%08X).\n", job->id, job)); + + mali_soft_job_system_lock(job->system); + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state || + MALI_SOFT_JOB_STATE_SIGNALED == job->state); + + if (unlikely(job->system->session->is_aborting)) { + /* The session is aborting. This job will be released and destroyed by @ref + * mali_soft_job_system_abort(). */ + mali_soft_job_system_unlock(job->system); + + return MALI_SCHEDULER_MASK_EMPTY; + } + + if (MALI_SOFT_JOB_STATE_STARTED != job->state) { + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state); + + /* The job is about to be signaled, ignore timeout. */ + MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeout on soft job %u (0x%08X) in signaled state.\n", job->id, job)); + mali_soft_job_system_unlock(job->system); + return schedule_mask; + } + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state); + + job->state = MALI_SOFT_JOB_STATE_TIMED_OUT; + _mali_osk_atomic_inc(&job->refcount); + + mali_soft_job_system_unlock(job->system); + + schedule_mask = mali_timeline_tracker_release(&job->tracker); + + mali_soft_job_destroy(job); + + return schedule_mask; +} + +void mali_soft_job_system_abort(struct mali_soft_job_system *system) +{ + struct mali_soft_job *job, *tmp; + _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs); + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT(system->session->is_aborting); + + MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting soft job system for session 0x%08X.\n", system->session)); + + mali_soft_job_system_lock(system); + + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) { + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state || + MALI_SOFT_JOB_STATE_TIMED_OUT == job->state); + + if (MALI_SOFT_JOB_STATE_STARTED == job->state) { + /* If the job has been activated, we have to release the tracker and destroy + * the job. If not, the tracker will be released and the job destroyed when + * it is activated. */ + if (MALI_TRUE == job->activated) { + MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting unsignaled soft job %u (0x%08X).\n", job->id, job)); + + job->state = MALI_SOFT_JOB_STATE_SIGNALED; + _mali_osk_list_move(&job->system_list, &jobs); + } + } else if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) { + MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting timed out soft job %u (0x%08X).\n", job->id, job)); + + /* We need to destroy this soft job. */ + _mali_osk_list_move(&job->system_list, &jobs); + } + } + + mali_soft_job_system_unlock(system); + + /* Release and destroy jobs. */ + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) { + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state || + MALI_SOFT_JOB_STATE_TIMED_OUT == job->state); + + if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) { + mali_timeline_tracker_release(&job->tracker); + } + + /* Move job back to used list before destroying. */ + _mali_osk_list_move(&job->system_list, &system->jobs_used); + + mali_soft_job_destroy(job); + } +} diff --git a/drivers/gpu/arm/utgard/common/mali_soft_job.h b/drivers/gpu/arm/utgard/common/mali_soft_job.h new file mode 100644 index 000000000000..f35394e60384 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_soft_job.h @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SOFT_JOB_H__ +#define __MALI_SOFT_JOB_H__ + +#include "mali_osk.h" + +#include "mali_timeline.h" + +struct mali_timeline_fence; +struct mali_session_data; +struct mali_soft_job; +struct mali_soft_job_system; + +/** + * Soft job types. + * + * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either + * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out + * by the Timeline system. + * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically + * in kernel when the job is activated. + */ +typedef enum mali_soft_job_type { + MALI_SOFT_JOB_TYPE_SELF_SIGNALED, + MALI_SOFT_JOB_TYPE_USER_SIGNALED, +} mali_soft_job_type; + +/** + * Soft job state. + * + * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED. + * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED. + * + * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to + * MALI_SOFT_JOB_STATE_SIGNALED when @ref mali_soft_job_system_signal_job is called and the soft + * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT. + * + * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the + * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT. This can only happen to soft jobs in state + * MALI_SOFT_JOB_STATE_STARTED. + * + */ +typedef enum mali_soft_job_state { + MALI_SOFT_JOB_STATE_ALLOCATED, + MALI_SOFT_JOB_STATE_STARTED, + MALI_SOFT_JOB_STATE_SIGNALED, + MALI_SOFT_JOB_STATE_TIMED_OUT, +} mali_soft_job_state; + +#define MALI_SOFT_JOB_INVALID_ID ((u32) -1) + +/** + * Soft job struct. + * + * Soft job can be used to represent any kind of CPU work done in kernel-space. + */ +typedef struct mali_soft_job { + mali_soft_job_type type; /**< Soft job type. Must be one of MALI_SOFT_JOB_TYPE_*. */ + u64 user_job; /**< Identifier for soft job in user space. */ + _mali_osk_atomic_t refcount; /**< Soft jobs are reference counted to prevent premature deletion. */ + struct mali_timeline_tracker tracker; /**< Timeline tracker for soft job. */ + mali_bool activated; /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */ + _mali_osk_notification_t *activated_notification; /**< Pre-allocated notification object for ACTIVATED_NOTIFICATION. */ + + /* Protected by soft job system lock. */ + u32 id; /**< Used by user-space to find corresponding soft job in kernel-space. */ + mali_soft_job_state state; /**< State of soft job, must be one of MALI_SOFT_JOB_STATE_*. */ + struct mali_soft_job_system *system; /**< The soft job system this job is in. */ + _mali_osk_list_t system_list; /**< List element used by soft job system. */ +} mali_soft_job; + +/** + * Per-session soft job system. + * + * The soft job system is used to manage all soft jobs that belongs to a session. + */ +typedef struct mali_soft_job_system { + struct mali_session_data *session; /**< The session this soft job system belongs to. */ + _MALI_OSK_LIST_HEAD(jobs_used); /**< List of all allocated soft jobs. */ + + _mali_osk_spinlock_irq_t *lock; /**< Lock used to protect soft job system and its soft jobs. */ + u32 lock_owner; /**< Contains tid of thread that locked the system or 0, if not locked. */ + u32 last_job_id; /**< Recored the last job id protected by lock. */ +} mali_soft_job_system; + +/** + * Create a soft job system. + * + * @param session The session this soft job system will belong to. + * @return The new soft job system, or NULL if unsuccessful. + */ +struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session); + +/** + * Destroy a soft job system. + * + * @note The soft job must not have any started or activated jobs. Call @ref + * mali_soft_job_system_abort first. + * + * @param system The soft job system we are destroying. + */ +void mali_soft_job_system_destroy(struct mali_soft_job_system *system); + +/** + * Create a soft job. + * + * @param system Soft job system to create soft job from. + * @param type Type of the soft job. + * @param user_job Identifier for soft job in user space. + * @return New soft job if successful, NULL if not. + */ +struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job); + +/** + * Destroy soft job. + * + * @param job Soft job to destroy. + */ +void mali_soft_job_destroy(struct mali_soft_job *job); + +/** + * Start a soft job. + * + * The soft job will be added to the Timeline system which will then activate it after all + * dependencies have been resolved. + * + * Create soft jobs with @ref mali_soft_job_create before starting them. + * + * @param job Soft job to start. + * @param fence Fence representing dependencies for this soft job. + * @return Point on soft job timeline. + */ +mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence); + +/** + * Use by user-space to signal that a soft job has completed. + * + * @note Only valid for soft jobs with type MALI_SOFT_JOB_TYPE_USER_SIGNALED. + * + * @note The soft job must be in state MALI_SOFT_JOB_STATE_STARTED for the signal to be successful. + * + * @note If the soft job was signaled successfully, or it received a time out, the soft job will be + * destroyed after this call and should no longer be used. + * + * @note This function will block until the soft job has been activated. + * + * @param system The soft job system the job was started in. + * @param job_id ID of soft job we are signaling. + * + * @return _MALI_OSK_ERR_ITEM_NOT_FOUND if the soft job ID was invalid, _MALI_OSK_ERR_TIMEOUT if the + * soft job was timed out or _MALI_OSK_ERR_OK if we successfully signaled the soft job. + */ +_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id); + +/** + * Used by the Timeline system to activate a soft job. + * + * @param job The soft job that is being activated. + * @return A scheduling bitmask. + */ +mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job); + +/** + * Used by the Timeline system to timeout a soft job. + * + * A soft job is timed out if it completes or is signaled later than MALI_TIMELINE_TIMEOUT_HZ after + * activation. + * + * @param job The soft job that is being timed out. + * @return A scheduling bitmask. + */ +mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job); + +/** + * Used to cleanup activated soft jobs in the soft job system on session abort. + * + * @param system The soft job system that is being aborted. + */ +void mali_soft_job_system_abort(struct mali_soft_job_system *system); + +#endif /* __MALI_SOFT_JOB_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c new file mode 100644 index 000000000000..178abaf43ba1 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_spinlock_reentrant.h" + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order) +{ + struct mali_spinlock_reentrant *spinlock; + + spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant)); + if (NULL == spinlock) { + return NULL; + } + + spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order); + if (NULL == spinlock->lock) { + mali_spinlock_reentrant_term(spinlock); + return NULL; + } + + return spinlock; +} + +void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock) +{ + MALI_DEBUG_ASSERT_POINTER(spinlock); + MALI_DEBUG_ASSERT(0 == spinlock->counter && 0 == spinlock->owner); + + if (NULL != spinlock->lock) { + _mali_osk_spinlock_irq_term(spinlock->lock); + } + + _mali_osk_free(spinlock); +} + +void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid) +{ + MALI_DEBUG_ASSERT_POINTER(spinlock); + MALI_DEBUG_ASSERT_POINTER(spinlock->lock); + MALI_DEBUG_ASSERT(0 != tid); + + MALI_DEBUG_PRINT(5, ("%s ^\n", __FUNCTION__)); + + if (tid != spinlock->owner) { + _mali_osk_spinlock_irq_lock(spinlock->lock); + MALI_DEBUG_ASSERT(0 == spinlock->owner && 0 == spinlock->counter); + spinlock->owner = tid; + } + + MALI_DEBUG_PRINT(5, ("%s v\n", __FUNCTION__)); + + ++spinlock->counter; +} + +void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid) +{ + MALI_DEBUG_ASSERT_POINTER(spinlock); + MALI_DEBUG_ASSERT_POINTER(spinlock->lock); + MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner); + + --spinlock->counter; + if (0 == spinlock->counter) { + spinlock->owner = 0; + MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__)); + _mali_osk_spinlock_irq_unlock(spinlock->lock); + } +} diff --git a/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h new file mode 100644 index 000000000000..6a62df850b2f --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SPINLOCK_REENTRANT_H__ +#define __MALI_SPINLOCK_REENTRANT_H__ + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +/** + * Reentrant spinlock. + */ +struct mali_spinlock_reentrant { + _mali_osk_spinlock_irq_t *lock; + u32 owner; + u32 counter; +}; + +/** + * Create a new reentrant spinlock. + * + * @param lock_order Lock order. + * @return New reentrant spinlock. + */ +struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order); + +/** + * Terminate reentrant spinlock and free any associated resources. + * + * @param spinlock Reentrant spinlock to terminate. + */ +void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock); + +/** + * Wait for reentrant spinlock to be signaled. + * + * @param spinlock Reentrant spinlock. + * @param tid Thread ID. + */ +void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid); + +/** + * Signal reentrant spinlock. + * + * @param spinlock Reentrant spinlock. + * @param tid Thread ID. + */ +void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid); + +/** + * Check if thread is holding reentrant spinlock. + * + * @param spinlock Reentrant spinlock. + * @param tid Thread ID. + * @return MALI_TRUE if thread is holding spinlock, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_spinlock_reentrant_is_held(struct mali_spinlock_reentrant *spinlock, u32 tid) +{ + MALI_DEBUG_ASSERT_POINTER(spinlock->lock); + return (tid == spinlock->owner && 0 < spinlock->counter); +} + +#endif /* __MALI_SPINLOCK_REENTRANT_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_timeline.c b/drivers/gpu/arm/utgard/common/mali_timeline.c new file mode 100644 index 000000000000..5a767b39d56e --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_timeline.c @@ -0,0 +1,1586 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_timeline.h" +#include "mali_kernel_common.h" +#include "mali_scheduler.h" +#include "mali_soft_job.h" +#include "mali_timeline_fence_wait.h" +#include "mali_timeline_sync_fence.h" +#include "mali_executor.h" +#include "mali_pp_job.h" + +#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid())) + +/* + * Following three elements are used to record how many + * gp, physical pp or virtual pp jobs are delayed in the whole + * timeline system, we can use these three value to decide + * if need to deactivate idle group. + */ +_mali_osk_atomic_t gp_tracker_count; +_mali_osk_atomic_t phy_pp_tracker_count; +_mali_osk_atomic_t virt_pp_tracker_count; + +static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system, + struct mali_timeline_waiter *waiter); + +#if defined(CONFIG_SYNC) +#include <linux/version.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) +#include <linux/list.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> + +struct mali_deferred_fence_put_entry { + struct hlist_node list; + struct sync_fence *fence; +}; + +static HLIST_HEAD(mali_timeline_sync_fence_to_free_list); +static DEFINE_SPINLOCK(mali_timeline_sync_fence_to_free_lock); + +static void put_sync_fences(struct work_struct *ignore) +{ + struct hlist_head list; + struct hlist_node *tmp, *pos; + unsigned long flags; + struct mali_deferred_fence_put_entry *o; + + spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags); + hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list); + spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags); + + hlist_for_each_entry_safe(o, pos, tmp, &list, list) { + sync_fence_put(o->fence); + kfree(o); + } +} + +static DECLARE_DELAYED_WORK(delayed_sync_fence_put, put_sync_fences); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */ + +/* Callback that is called when a sync fence a tracker is waiting on is signaled. */ +static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter) +{ + struct mali_timeline_system *system; + struct mali_timeline_waiter *waiter; + struct mali_timeline_tracker *tracker; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + u32 tid = _mali_osk_get_tid(); + mali_bool is_aborting = MALI_FALSE; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + int fence_status = sync_fence->status; +#else + int fence_status = atomic_read(&sync_fence->status); +#endif + + MALI_DEBUG_ASSERT_POINTER(sync_fence); + MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter); + + tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter); + MALI_DEBUG_ASSERT_POINTER(tracker); + + system = tracker->system; + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + is_aborting = system->session->is_aborting; + if (!is_aborting && (0 > fence_status)) { + MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status)); + tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT; + } + + waiter = tracker->waiter_sync; + MALI_DEBUG_ASSERT_POINTER(waiter); + + tracker->sync_fence = NULL; + tracker->fence.sync_fd = -1; + + schedule_mask |= mali_timeline_system_release_waiter(system, waiter); + + /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */ + if (is_aborting) { + _mali_osk_wait_queue_wake_up(system->wait_queue); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + /* + * Older versions of Linux, before 3.5, doesn't support fput() in interrupt + * context. For those older kernels, allocate a list object and put the + * fence object on that and defer the call to sync_fence_put() to a workqueue. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) + { + struct mali_deferred_fence_put_entry *obj; + + obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC); + if (obj) { + unsigned long flags; + mali_bool schedule = MALI_FALSE; + + obj->fence = sync_fence; + + spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags); + if (hlist_empty(&mali_timeline_sync_fence_to_free_list)) + schedule = MALI_TRUE; + hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list); + spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags); + + if (schedule) + schedule_delayed_work(&delayed_sync_fence_put, 0); + } + } +#else + sync_fence_put(sync_fence); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */ + + if (!is_aborting) { + mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE); + } +} +#endif /* defined(CONFIG_SYNC) */ + +static mali_scheduler_mask mali_timeline_tracker_time_out(struct mali_timeline_tracker *tracker) +{ + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_SOFT == tracker->type); + + return mali_soft_job_system_timeout_job((struct mali_soft_job *) tracker->job); +} + +static void mali_timeline_timer_callback(void *data) +{ + struct mali_timeline_system *system; + struct mali_timeline_tracker *tracker; + struct mali_timeline *timeline; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + u32 tid = _mali_osk_get_tid(); + + timeline = (struct mali_timeline *) data; + MALI_DEBUG_ASSERT_POINTER(timeline); + + system = timeline->system; + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + if (!system->timer_enabled) { + mali_spinlock_reentrant_signal(system->spinlock, tid); + return; + } + + tracker = timeline->tracker_tail; + timeline->timer_active = MALI_FALSE; + + if (NULL != tracker && MALI_TRUE == tracker->timer_active) { + /* This is likely the delayed work that has been schedule out before cancelled. */ + if (MALI_TIMELINE_TIMEOUT_HZ > (_mali_osk_time_tickcount() - tracker->os_tick_activate)) { + mali_spinlock_reentrant_signal(system->spinlock, tid); + return; + } + + schedule_mask = mali_timeline_tracker_time_out(tracker); + tracker->timer_active = MALI_FALSE; + } else { + MALI_PRINT_ERROR(("Mali Timeline: Soft job timer callback without a waiting tracker.\n")); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE); +} + +void mali_timeline_system_stop_timer(struct mali_timeline_system *system) +{ + u32 i; + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + system->timer_enabled = MALI_FALSE; + mali_spinlock_reentrant_signal(system->spinlock, tid); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + if (NULL != timeline->delayed_work) { + _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work); + timeline->timer_active = MALI_FALSE; + } + } +} + +static void mali_timeline_destroy(struct mali_timeline *timeline) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + if (NULL != timeline) { + /* Assert that the timeline object has been properly cleaned up before destroying it. */ + MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_head); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail); + MALI_DEBUG_ASSERT(NULL == timeline->waiter_head); + MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail); + MALI_DEBUG_ASSERT(NULL != timeline->system); + MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id); + +#if defined(CONFIG_SYNC) + if (NULL != timeline->sync_tl) { + sync_timeline_destroy(timeline->sync_tl); + } +#endif /* defined(CONFIG_SYNC) */ + + if (NULL != timeline->delayed_work) { + _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work); + _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work); + } + +#ifndef CONFIG_SYNC + _mali_osk_free(timeline); +#endif + } +} + +static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *system, enum mali_timeline_id id) +{ + struct mali_timeline *timeline; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT(id < MALI_TIMELINE_MAX); + + timeline = (struct mali_timeline *) _mali_osk_calloc(1, sizeof(struct mali_timeline)); + if (NULL == timeline) { + return NULL; + } + + /* Initially the timeline is empty. */ +#if defined(MALI_TIMELINE_DEBUG_START_POINT) + /* Start the timeline a bit before wrapping when debugging. */ + timeline->point_next = UINT_MAX - MALI_TIMELINE_MAX_POINT_SPAN - 128; +#else + timeline->point_next = 1; +#endif + timeline->point_oldest = timeline->point_next; + + /* The tracker and waiter lists will initially be empty. */ + + timeline->system = system; + timeline->id = id; + + timeline->delayed_work = _mali_osk_wq_delayed_create_work(mali_timeline_timer_callback, timeline); + if (NULL == timeline->delayed_work) { + mali_timeline_destroy(timeline); + return NULL; + } + + timeline->timer_active = MALI_FALSE; + +#if defined(CONFIG_SYNC) + { + char timeline_name[32]; + + switch (id) { + case MALI_TIMELINE_GP: + _mali_osk_snprintf(timeline_name, 32, "mali-%u-gp", _mali_osk_get_pid()); + break; + case MALI_TIMELINE_PP: + _mali_osk_snprintf(timeline_name, 32, "mali-%u-pp", _mali_osk_get_pid()); + break; + case MALI_TIMELINE_SOFT: + _mali_osk_snprintf(timeline_name, 32, "mali-%u-soft", _mali_osk_get_pid()); + break; + default: + MALI_PRINT_ERROR(("Mali Timeline: Invalid timeline id %d\n", id)); + mali_timeline_destroy(timeline); + return NULL; + } + + timeline->destroyed = MALI_FALSE; + + timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name); + if (NULL == timeline->sync_tl) { + mali_timeline_destroy(timeline); + return NULL; + } + + timeline->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM); + if (NULL == timeline->spinlock) { + mali_timeline_destroy(timeline); + return NULL; + } + } +#endif /* defined(CONFIG_SYNC) */ + + return timeline; +} + +static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct mali_timeline_tracker *tracker) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT_POINTER(tracker); + + if (mali_timeline_is_full(timeline)) { + /* Don't add tracker if timeline is full. */ + tracker->point = MALI_TIMELINE_NO_POINT; + return; + } + + tracker->timeline = timeline; + tracker->point = timeline->point_next; + + /* Find next available point. */ + timeline->point_next++; + if (MALI_TIMELINE_NO_POINT == timeline->point_next) { + timeline->point_next++; + } + + MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline)); + + if (MALI_TIMELINE_TRACKER_GP == tracker->type) { + _mali_osk_atomic_inc(&gp_tracker_count); + } else if (MALI_TIMELINE_TRACKER_PP == tracker->type) { + if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) { + _mali_osk_atomic_inc(&virt_pp_tracker_count); + } else { + _mali_osk_atomic_inc(&phy_pp_tracker_count); + } + } + + /* Add tracker as new head on timeline's tracker list. */ + if (NULL == timeline->tracker_head) { + /* Tracker list is empty. */ + MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail); + + timeline->tracker_tail = tracker; + + MALI_DEBUG_ASSERT(NULL == tracker->timeline_next); + MALI_DEBUG_ASSERT(NULL == tracker->timeline_prev); + } else { + MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next); + + tracker->timeline_prev = timeline->tracker_head; + timeline->tracker_head->timeline_next = tracker; + + MALI_DEBUG_ASSERT(NULL == tracker->timeline_next); + } + timeline->tracker_head = tracker; + + MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail->timeline_prev); +} + +/* Inserting the waiter object into the given timeline */ +static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct mali_timeline_waiter *waiter_new) +{ + struct mali_timeline_waiter *waiter_prev; + struct mali_timeline_waiter *waiter_next; + + /* Waiter time must be between timeline head and tail, and there must + * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */ + MALI_DEBUG_ASSERT((waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN); + MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN); + + /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/ + waiter_prev = timeline->waiter_head; /* Insert new after waiter_prev */ + waiter_next = NULL; /* Insert new before waiter_next */ + + /* Iterating backwards from head (newest) to tail (oldest) until we + * find the correct spot to insert the new waiter */ + while (waiter_prev && mali_timeline_point_after(waiter_prev->point, waiter_new->point)) { + waiter_next = waiter_prev; + waiter_prev = waiter_prev->timeline_prev; + } + + if (NULL == waiter_prev && NULL == waiter_next) { + /* list is empty */ + timeline->waiter_head = waiter_new; + timeline->waiter_tail = waiter_new; + } else if (NULL == waiter_next) { + /* insert at head */ + waiter_new->timeline_prev = timeline->waiter_head; + timeline->waiter_head->timeline_next = waiter_new; + timeline->waiter_head = waiter_new; + } else if (NULL == waiter_prev) { + /* insert at tail */ + waiter_new->timeline_next = timeline->waiter_tail; + timeline->waiter_tail->timeline_prev = waiter_new; + timeline->waiter_tail = waiter_new; + } else { + /* insert between */ + waiter_new->timeline_next = waiter_next; + waiter_new->timeline_prev = waiter_prev; + waiter_next->timeline_prev = waiter_new; + waiter_prev->timeline_next = waiter_new; + } +} + +static void mali_timeline_update_delayed_work(struct mali_timeline *timeline) +{ + struct mali_timeline_system *system; + struct mali_timeline_tracker *oldest_tracker; + + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id); + + system = timeline->system; + MALI_DEBUG_ASSERT_POINTER(system); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + /* Timer is disabled, early out. */ + if (!system->timer_enabled) return; + + oldest_tracker = timeline->tracker_tail; + if (NULL != oldest_tracker && 0 == oldest_tracker->trigger_ref_count) { + if (MALI_FALSE == oldest_tracker->timer_active) { + if (MALI_TRUE == timeline->timer_active) { + _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work); + } + _mali_osk_wq_delayed_schedule_work(timeline->delayed_work, MALI_TIMELINE_TIMEOUT_HZ); + oldest_tracker->timer_active = MALI_TRUE; + timeline->timer_active = MALI_TRUE; + } + } else if (MALI_TRUE == timeline->timer_active) { + _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work); + timeline->timer_active = MALI_FALSE; + } +} + +static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timeline *timeline) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + MALI_DEBUG_CODE({ + struct mali_timeline_system *system = timeline->system; + MALI_DEBUG_ASSERT_POINTER(system); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + }); + + if (NULL != timeline->tracker_tail) { + /* Set oldest point to oldest tracker's point */ + timeline->point_oldest = timeline->tracker_tail->point; + } else { + /* No trackers, mark point list as empty */ + timeline->point_oldest = timeline->point_next; + } + + /* Release all waiters no longer on the timeline's point list. + * Releasing a waiter can trigger this function to be called again, so + * we do not store any pointers on stack. */ + while (NULL != timeline->waiter_tail) { + u32 waiter_time_relative; + u32 time_head_relative; + struct mali_timeline_waiter *waiter = timeline->waiter_tail; + + time_head_relative = timeline->point_next - timeline->point_oldest; + waiter_time_relative = waiter->point - timeline->point_oldest; + + if (waiter_time_relative < time_head_relative) { + /* This and all following waiters are on the point list, so we are done. */ + break; + } + + /* Remove waiter from timeline's waiter list. */ + if (NULL != waiter->timeline_next) { + waiter->timeline_next->timeline_prev = NULL; + } else { + /* This was the last waiter */ + timeline->waiter_head = NULL; + } + timeline->waiter_tail = waiter->timeline_next; + + /* Release waiter. This could activate a tracker, if this was + * the last waiter for the tracker. */ + schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter); + } + + return schedule_mask; +} + +void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker, + mali_timeline_tracker_type type, + struct mali_timeline_fence *fence, + void *job) +{ + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT_POINTER(job); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > type); + + /* Zero out all tracker members. */ + _mali_osk_memset(tracker, 0, sizeof(*tracker)); + + tracker->type = type; + tracker->job = job; + tracker->trigger_ref_count = 1; /* Prevents any callback from trigging while adding it */ + tracker->os_tick_create = _mali_osk_time_tickcount(); + MALI_DEBUG_CODE(tracker->magic = MALI_TIMELINE_TRACKER_MAGIC); + + tracker->activation_error = MALI_TIMELINE_ACTIVATION_ERROR_NONE; + + /* Copy fence. */ + if (NULL != fence) { + _mali_osk_memcpy(&tracker->fence, fence, sizeof(struct mali_timeline_fence)); + } +} + +mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker) +{ + struct mali_timeline *timeline; + struct mali_timeline_system *system; + struct mali_timeline_tracker *tracker_next, *tracker_prev; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + u32 tid = _mali_osk_get_tid(); + + /* Upon entry a group lock will be held, but not a scheduler lock. */ + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic); + + /* Tracker should have been triggered */ + MALI_DEBUG_ASSERT(0 == tracker->trigger_ref_count); + + /* All waiters should have been released at this point */ + MALI_DEBUG_ASSERT(NULL == tracker->waiter_head); + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + + MALI_DEBUG_PRINT(3, ("Mali Timeline: releasing tracker for job 0x%08X\n", tracker->job)); + + timeline = tracker->timeline; + if (NULL == timeline) { + /* Tracker was not on a timeline, there is nothing to release. */ + return MALI_SCHEDULER_MASK_EMPTY; + } + + system = timeline->system; + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + /* Tracker should still be on timeline */ + MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline)); + MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, tracker->point)); + + /* Tracker is no longer valid. */ + MALI_DEBUG_CODE(tracker->magic = 0); + + tracker_next = tracker->timeline_next; + tracker_prev = tracker->timeline_prev; + tracker->timeline_next = NULL; + tracker->timeline_prev = NULL; + + /* Removing tracker from timeline's tracker list */ + if (NULL == tracker_next) { + /* This tracker was the head */ + timeline->tracker_head = tracker_prev; + } else { + tracker_next->timeline_prev = tracker_prev; + } + + if (NULL == tracker_prev) { + /* This tracker was the tail */ + timeline->tracker_tail = tracker_next; + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + /* Update the timeline's oldest time and release any waiters */ + schedule_mask |= mali_timeline_update_oldest_point(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + } else { + tracker_prev->timeline_next = tracker_next; + } + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + /* Update delayed work only when it is the soft job timeline */ + if (MALI_TIMELINE_SOFT == tracker->timeline->id) { + mali_timeline_update_delayed_work(tracker->timeline); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + return schedule_mask; +} + +void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system, + struct mali_timeline_waiter *tail, + struct mali_timeline_waiter *head) +{ + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(head); + MALI_DEBUG_ASSERT_POINTER(tail); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + head->tracker_next = system->waiter_empty_list; + system->waiter_empty_list = tail; +} + +static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + struct mali_timeline_system *system; + struct mali_timeline *timeline; + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic); + + system = tracker->system; + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + tracker->os_tick_activate = _mali_osk_time_tickcount(); + + if (NULL != tracker->waiter_head) { + mali_timeline_system_release_waiter_list(system, tracker->waiter_tail, tracker->waiter_head); + tracker->waiter_head = NULL; + tracker->waiter_tail = NULL; + } + + switch (tracker->type) { + case MALI_TIMELINE_TRACKER_GP: + schedule_mask = mali_scheduler_activate_gp_job((struct mali_gp_job *) tracker->job); + + _mali_osk_atomic_dec(&gp_tracker_count); + break; + case MALI_TIMELINE_TRACKER_PP: + if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) { + _mali_osk_atomic_dec(&virt_pp_tracker_count); + } else { + _mali_osk_atomic_dec(&phy_pp_tracker_count); + } + schedule_mask = mali_scheduler_activate_pp_job((struct mali_pp_job *) tracker->job); + break; + case MALI_TIMELINE_TRACKER_SOFT: + timeline = tracker->timeline; + MALI_DEBUG_ASSERT_POINTER(timeline); + + schedule_mask |= mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job); + + /* Start a soft timer to make sure the soft job be released in a limited time */ + mali_spinlock_reentrant_wait(system->spinlock, tid); + mali_timeline_update_delayed_work(timeline); + mali_spinlock_reentrant_signal(system->spinlock, tid); + break; + case MALI_TIMELINE_TRACKER_WAIT: + mali_timeline_fence_wait_activate((struct mali_timeline_fence_wait_tracker *) tracker->job); + break; + case MALI_TIMELINE_TRACKER_SYNC: +#if defined(CONFIG_SYNC) + mali_timeline_sync_fence_activate((struct mali_timeline_sync_fence_tracker *) tracker->job); +#else + MALI_PRINT_ERROR(("Mali Timeline: sync tracker not supported\n", tracker->type)); +#endif /* defined(CONFIG_SYNC) */ + break; + default: + MALI_PRINT_ERROR(("Mali Timeline - Illegal tracker type: %d\n", tracker->type)); + break; + } + + return schedule_mask; +} + +void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker) +{ + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count); + tracker->trigger_ref_count++; + + mali_spinlock_reentrant_signal(system->spinlock, tid); +} + +mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error) +{ + u32 tid = _mali_osk_get_tid(); + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count); + tracker->trigger_ref_count--; + + tracker->activation_error |= activation_error; + + if (0 == tracker->trigger_ref_count) { + schedule_mask |= mali_timeline_tracker_activate(tracker); + tracker = NULL; + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + return schedule_mask; +} + +void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(fence); + MALI_DEBUG_ASSERT_POINTER(uk_fence); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + fence->points[i] = uk_fence->points[i]; + } + + fence->sync_fd = uk_fence->sync_fd; +} + +struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session) +{ + u32 i; + struct mali_timeline_system *system; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_PRINT(4, ("Mali Timeline: creating timeline system\n")); + + system = (struct mali_timeline_system *) _mali_osk_calloc(1, sizeof(struct mali_timeline_system)); + if (NULL == system) { + return NULL; + } + + system->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM); + if (NULL == system->spinlock) { + mali_timeline_system_destroy(system); + return NULL; + } + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + system->timelines[i] = mali_timeline_create(system, (enum mali_timeline_id)i); + if (NULL == system->timelines[i]) { + mali_timeline_system_destroy(system); + return NULL; + } + } + +#if defined(CONFIG_SYNC) + system->signaled_sync_tl = mali_sync_timeline_create(NULL, "mali-always-signaled"); + if (NULL == system->signaled_sync_tl) { + mali_timeline_system_destroy(system); + return NULL; + } +#endif /* defined(CONFIG_SYNC) */ + + system->waiter_empty_list = NULL; + system->session = session; + system->timer_enabled = MALI_TRUE; + + system->wait_queue = _mali_osk_wait_queue_init(); + if (NULL == system->wait_queue) { + mali_timeline_system_destroy(system); + return NULL; + } + + return system; +} + +#if defined(CONFIG_SYNC) + +/** + * Check if there are any trackers left on timeline. + * + * Used as a wait queue conditional. + * + * @param data Timeline. + * @return MALI_TRUE if there are no trackers on timeline, MALI_FALSE if not. + */ +static mali_bool mali_timeline_has_no_trackers(void *data) +{ + struct mali_timeline *timeline = (struct mali_timeline *) data; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + return mali_timeline_is_empty(timeline); +} + +/** + * Cancel sync fence waiters waited upon by trackers on all timelines. + * + * Will return after all timelines have no trackers left. + * + * @param system Timeline system. + */ +static void mali_timeline_cancel_sync_fence_waiters(struct mali_timeline_system *system) +{ + u32 i; + u32 tid = _mali_osk_get_tid(); + struct mali_timeline_tracker *tracker, *tracker_next; + _MALI_OSK_LIST_HEAD_STATIC_INIT(tracker_list); + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT(system->session->is_aborting); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + /* Cancel sync fence waiters. */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + tracker_next = timeline->tracker_tail; + while (NULL != tracker_next) { + tracker = tracker_next; + tracker_next = tracker->timeline_next; + + if (NULL == tracker->sync_fence) continue; + + MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling sync fence wait for tracker 0x%08X.\n", tracker)); + + /* Cancel sync fence waiter. */ + if (0 == sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) { + /* Callback was not called, move tracker to local list. */ + _mali_osk_list_add(&tracker->sync_fence_cancel_list, &tracker_list); + } + } + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + /* Manually call sync fence callback in order to release waiter and trigger activation of tracker. */ + _MALI_OSK_LIST_FOREACHENTRY(tracker, tracker_next, &tracker_list, struct mali_timeline_tracker, sync_fence_cancel_list) { + mali_timeline_sync_fence_callback(tracker->sync_fence, &tracker->sync_fence_waiter); + } + + /* Sleep until all sync fence callbacks are done and all timelines are empty. */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline); + } +} + +#endif /* defined(CONFIG_SYNC) */ + +void mali_timeline_system_abort(struct mali_timeline_system *system) +{ + MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid();); + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT(system->session->is_aborting); + + MALI_DEBUG_PRINT(3, ("Mali Timeline: Aborting timeline system for session 0x%08X.\n", system->session)); + +#if defined(CONFIG_SYNC) + mali_timeline_cancel_sync_fence_waiters(system); +#endif /* defined(CONFIG_SYNC) */ + + /* Should not be any waiters or trackers left at this point. */ + MALI_DEBUG_CODE({ + u32 i; + mali_spinlock_reentrant_wait(system->spinlock, tid); + for (i = 0; i < MALI_TIMELINE_MAX; ++i) + { + struct mali_timeline *timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_head); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail); + MALI_DEBUG_ASSERT(NULL == timeline->waiter_head); + MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail); + } + mali_spinlock_reentrant_signal(system->spinlock, tid); + }); +} + +void mali_timeline_system_destroy(struct mali_timeline_system *system) +{ + u32 i; + struct mali_timeline_waiter *waiter, *next; +#if defined(CONFIG_SYNC) + u32 tid = _mali_osk_get_tid(); +#endif + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n")); + + if (NULL != system) { + + /* There should be no waiters left on this queue. */ + if (NULL != system->wait_queue) { + _mali_osk_wait_queue_term(system->wait_queue); + system->wait_queue = NULL; + } + + /* Free all waiters in empty list */ + waiter = system->waiter_empty_list; + while (NULL != waiter) { + next = waiter->tracker_next; + _mali_osk_free(waiter); + waiter = next; + } + +#if defined(CONFIG_SYNC) + if (NULL != system->signaled_sync_tl) { + sync_timeline_destroy(system->signaled_sync_tl); + } + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + if ((NULL != system->timelines[i]) && (NULL != system->timelines[i]->spinlock)) { + mali_spinlock_reentrant_wait(system->timelines[i]->spinlock, tid); + system->timelines[i]->destroyed = MALI_TRUE; + mali_spinlock_reentrant_signal(system->timelines[i]->spinlock, tid); + } + } +#endif /* defined(CONFIG_SYNC) */ + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + if (NULL != system->timelines[i]) { + mali_timeline_destroy(system->timelines[i]); + } + } + + if (NULL != system->spinlock) { + mali_spinlock_reentrant_term(system->spinlock); + } + + _mali_osk_free(system); + } +} + +/** + * Find how many waiters are needed for a given fence. + * + * @param fence The fence to check. + * @return Number of waiters needed for fence. + */ +static u32 mali_timeline_fence_num_waiters(struct mali_timeline_fence *fence) +{ + u32 i, num_waiters = 0; + + MALI_DEBUG_ASSERT_POINTER(fence); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + if (MALI_TIMELINE_NO_POINT != fence->points[i]) { + ++num_waiters; + } + } + +#if defined(CONFIG_SYNC) + if (-1 != fence->sync_fd) ++num_waiters; +#endif /* defined(CONFIG_SYNC) */ + + return num_waiters; +} + +static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struct mali_timeline_system *system) +{ + struct mali_timeline_waiter *waiter; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + waiter = system->waiter_empty_list; + if (NULL != waiter) { + /* Remove waiter from empty list and zero it */ + system->waiter_empty_list = waiter->tracker_next; + _mali_osk_memset(waiter, 0, sizeof(*waiter)); + } + + /* Return NULL if list was empty. */ + return waiter; +} + +static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system, + struct mali_timeline_waiter **tail, + struct mali_timeline_waiter **head, + int max_num_waiters) +{ + u32 i, tid = _mali_osk_get_tid(); + mali_bool do_alloc; + struct mali_timeline_waiter *waiter; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(tail); + MALI_DEBUG_ASSERT_POINTER(head); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + *head = *tail = NULL; + do_alloc = MALI_FALSE; + i = 0; + while (i < max_num_waiters) { + if (MALI_FALSE == do_alloc) { + waiter = mali_timeline_system_get_zeroed_waiter(system); + if (NULL == waiter) { + do_alloc = MALI_TRUE; + mali_spinlock_reentrant_signal(system->spinlock, tid); + continue; + } + } else { + waiter = _mali_osk_calloc(1, sizeof(struct mali_timeline_waiter)); + if (NULL == waiter) break; + } + ++i; + if (NULL == *tail) { + *tail = waiter; + *head = waiter; + } else { + (*head)->tracker_next = waiter; + *head = waiter; + } + } + if (MALI_TRUE == do_alloc) { + mali_spinlock_reentrant_wait(system->spinlock, tid); + } +} + +/** + * Create waiters for the given tracker. The tracker is activated when all waiters are release. + * + * @note Tracker can potentially be activated before this function returns. + * + * @param system Timeline system. + * @param tracker Tracker we will create waiters for. + * @param waiter_tail List of pre-allocated waiters. + * @param waiter_head List of pre-allocated waiters. + */ +static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system, + struct mali_timeline_tracker *tracker, + struct mali_timeline_waiter *waiter_tail, + struct mali_timeline_waiter *waiter_head) +{ + int i; + u32 tid = _mali_osk_get_tid(); + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; +#if defined(CONFIG_SYNC) + struct sync_fence *sync_fence = NULL; +#endif /* defined(CONFIG_SYNC) */ + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(tracker); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + MALI_DEBUG_ASSERT(NULL == tracker->waiter_head); + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + MALI_DEBUG_ASSERT(NULL != tracker->job); + + /* Creating waiter object for all the timelines the fence is put on. Inserting this waiter + * into the timelines sorted list of waiters */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + mali_timeline_point point; + struct mali_timeline *timeline; + struct mali_timeline_waiter *waiter; + + /* Get point on current timeline from tracker's fence. */ + point = tracker->fence.points[i]; + + if (likely(MALI_TIMELINE_NO_POINT == point)) { + /* Fence contains no point on this timeline so we don't need a waiter. */ + continue; + } + + timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + if (unlikely(!mali_timeline_is_point_valid(timeline, point))) { + MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", + point, timeline->point_oldest, timeline->point_next)); + continue; + } + + if (likely(mali_timeline_is_point_released(timeline, point))) { + /* Tracker representing the point has been released so we don't need a + * waiter. */ + continue; + } + + /* The point is on timeline. */ + MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point)); + + /* Get a new zeroed waiter object. */ + if (likely(NULL != waiter_tail)) { + waiter = waiter_tail; + waiter_tail = waiter_tail->tracker_next; + } else { + MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n")); + continue; + } + + /* Yanking the trigger ref count of the tracker. */ + tracker->trigger_ref_count++; + + waiter->point = point; + waiter->tracker = tracker; + + /* Insert waiter on tracker's singly-linked waiter list. */ + if (NULL == tracker->waiter_head) { + /* list is empty */ + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + tracker->waiter_tail = waiter; + } else { + tracker->waiter_head->tracker_next = waiter; + } + tracker->waiter_head = waiter; + + /* Add waiter to timeline. */ + mali_timeline_insert_waiter(timeline, waiter); + } +#if defined(CONFIG_SYNC) + if (-1 != tracker->fence.sync_fd) { + int ret; + struct mali_timeline_waiter *waiter; + + sync_fence = sync_fence_fdget(tracker->fence.sync_fd); + if (unlikely(NULL == sync_fence)) { + MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", tracker->fence.sync_fd)); + goto exit; + } + + /* Check if we have a zeroed waiter object available. */ + if (unlikely(NULL == waiter_tail)) { + MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n")); + goto exit; + } + + /* Start asynchronous wait that will release waiter when the fence is signaled. */ + sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback); + ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter); + if (1 == ret) { + /* Fence already signaled, no waiter needed. */ + tracker->fence.sync_fd = -1; + goto exit; + } else if (0 != ret) { + MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret)); + tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT; + goto exit; + } + + /* Grab new zeroed waiter object. */ + waiter = waiter_tail; + waiter_tail = waiter_tail->tracker_next; + + /* Increase the trigger ref count of the tracker. */ + tracker->trigger_ref_count++; + + waiter->point = MALI_TIMELINE_NO_POINT; + waiter->tracker = tracker; + + /* Insert waiter on tracker's singly-linked waiter list. */ + if (NULL == tracker->waiter_head) { + /* list is empty */ + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + tracker->waiter_tail = waiter; + } else { + tracker->waiter_head->tracker_next = waiter; + } + tracker->waiter_head = waiter; + + /* Also store waiter in separate field for easy access by sync callback. */ + tracker->waiter_sync = waiter; + + /* Store the sync fence in tracker so we can retrieve in abort session, if needed. */ + tracker->sync_fence = sync_fence; + + sync_fence = NULL; + } +exit: +#endif /* defined(CONFIG_SYNC) */ + + if (NULL != waiter_tail) { + mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head); + } + + /* Release the initial trigger ref count. */ + tracker->trigger_ref_count--; + + /* If there were no waiters added to this tracker we activate immediately. */ + if (0 == tracker->trigger_ref_count) { + schedule_mask |= mali_timeline_tracker_activate(tracker); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + +#if defined(CONFIG_SYNC) + if (NULL != sync_fence) { + sync_fence_put(sync_fence); + } +#endif /* defined(CONFIG_SYNC) */ + + mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE); +} + +mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system, + struct mali_timeline_tracker *tracker, + enum mali_timeline_id timeline_id) +{ + int num_waiters = 0; + struct mali_timeline_waiter *waiter_tail, *waiter_head; + u32 tid = _mali_osk_get_tid(); + mali_timeline_point point = MALI_TIMELINE_NO_POINT; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT_POINTER(tracker); + + MALI_DEBUG_ASSERT(MALI_FALSE == system->session->is_aborting); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > tracker->type); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: adding tracker for job %p, timeline: %d\n", tracker->job, timeline_id)); + + MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count); + tracker->system = system; + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + num_waiters = mali_timeline_fence_num_waiters(&tracker->fence); + + /* Allocate waiters. */ + mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + /* Add tracker to timeline. This will allocate a point for the tracker on the timeline. If + * timeline ID is MALI_TIMELINE_NONE the tracker will NOT be added to a timeline and the + * point will be MALI_TIMELINE_NO_POINT. + * + * NOTE: the tracker can fail to be added if the timeline is full. If this happens, the + * point will be MALI_TIMELINE_NO_POINT. */ + MALI_DEBUG_ASSERT(timeline_id < MALI_TIMELINE_MAX || timeline_id == MALI_TIMELINE_NONE); + if (likely(timeline_id < MALI_TIMELINE_MAX)) { + struct mali_timeline *timeline = system->timelines[timeline_id]; + mali_timeline_insert_tracker(timeline, tracker); + MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline)); + } + + point = tracker->point; + + /* Create waiters for tracker based on supplied fence. Each waiter will increase the + * trigger ref count. */ + mali_timeline_system_create_waiters_and_unlock(system, tracker, waiter_tail, waiter_head); + tracker = NULL; + + /* At this point the tracker object might have been freed so we should no longer + * access it. */ + + + /* The tracker will always be activated after calling add_tracker, even if NO_POINT is + * returned. */ + return point; +} + +static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system, + struct mali_timeline_waiter *waiter) +{ + struct mali_timeline_tracker *tracker; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(waiter); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + tracker = waiter->tracker; + MALI_DEBUG_ASSERT_POINTER(tracker); + + /* At this point the waiter has been removed from the timeline's waiter list, but it is + * still on the tracker's waiter list. All of the tracker's waiters will be released when + * the tracker is activated. */ + + waiter->point = MALI_TIMELINE_NO_POINT; + waiter->tracker = NULL; + + tracker->trigger_ref_count--; + if (0 == tracker->trigger_ref_count) { + /* This was the last waiter; activate tracker */ + schedule_mask |= mali_timeline_tracker_activate(tracker); + tracker = NULL; + } + + return schedule_mask; +} + +mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system, + enum mali_timeline_id timeline_id) +{ + mali_timeline_point point; + struct mali_timeline *timeline; + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(system); + + if (MALI_TIMELINE_MAX <= timeline_id) { + return MALI_TIMELINE_NO_POINT; + } + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + timeline = system->timelines[timeline_id]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + point = MALI_TIMELINE_NO_POINT; + if (timeline->point_oldest != timeline->point_next) { + point = timeline->point_next - 1; + if (MALI_TIMELINE_NO_POINT == point) point--; + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + return point; +} + +void mali_timeline_initialize(void) +{ + _mali_osk_atomic_init(&gp_tracker_count, 0); + _mali_osk_atomic_init(&phy_pp_tracker_count, 0); + _mali_osk_atomic_init(&virt_pp_tracker_count, 0); +} + +void mali_timeline_terminate(void) +{ + _mali_osk_atomic_term(&gp_tracker_count); + _mali_osk_atomic_term(&phy_pp_tracker_count); + _mali_osk_atomic_term(&virt_pp_tracker_count); +} + +#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS) + +static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id) +{ + struct mali_timeline *timeline; + struct mali_timeline_system *system; + + MALI_DEBUG_ASSERT_POINTER(tracker); + + MALI_DEBUG_ASSERT_POINTER(tracker->timeline); + timeline = tracker->timeline; + + MALI_DEBUG_ASSERT_POINTER(timeline->system); + system = timeline->system; + + if (MALI_TIMELINE_MAX > id) { + if (MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) { + return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]); + } else { + return MALI_FALSE; + } + } else { + MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id); + return MALI_FALSE; + } +} + +static const char *timeline_id_to_string(enum mali_timeline_id id) +{ + switch (id) { + case MALI_TIMELINE_GP: + return "GP"; + case MALI_TIMELINE_PP: + return "PP"; + case MALI_TIMELINE_SOFT: + return "SOFT"; + default: + return "NONE"; + } +} + +static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_type type) +{ + switch (type) { + case MALI_TIMELINE_TRACKER_GP: + return "GP"; + case MALI_TIMELINE_TRACKER_PP: + return "PP"; + case MALI_TIMELINE_TRACKER_SOFT: + return "SOFT"; + case MALI_TIMELINE_TRACKER_WAIT: + return "WAIT"; + case MALI_TIMELINE_TRACKER_SYNC: + return "SYNC"; + default: + return "INVALID"; + } +} + +mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker) +{ + struct mali_timeline *timeline = NULL; + + MALI_DEBUG_ASSERT_POINTER(tracker); + timeline = tracker->timeline; + + if (0 != tracker->trigger_ref_count) { + return MALI_TIMELINE_TS_WAITING; + } + + if (timeline && (timeline->tracker_tail == tracker || NULL != tracker->timeline_prev)) { + return MALI_TIMELINE_TS_ACTIVE; + } + + if (timeline && (MALI_TIMELINE_NO_POINT == tracker->point)) { + return MALI_TIMELINE_TS_INIT; + } + + return MALI_TIMELINE_TS_FINISH; +} + +void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx) +{ + const char *tracker_state = "IWAF"; + char state_char = 'I'; + char tracker_type[32] = {0}; + + MALI_DEBUG_ASSERT_POINTER(tracker); + + state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)); + _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type)); + +#if defined(CONFIG_SYNC) + if (0 != tracker->trigger_ref_count) { + _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->fence.sync_fd, tracker->sync_fence, tracker->job); + } else { + _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->fence.sync_fd, tracker->sync_fence, tracker->job); + } +#else + if (0 != tracker->trigger_ref_count) { + _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->job); + } else { + _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->job); + } +#endif +} + +void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx) +{ + struct mali_timeline_tracker *tracker = NULL; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + tracker = timeline->tracker_tail; + while (NULL != tracker) { + mali_timeline_debug_print_tracker(tracker, print_ctx); + tracker = tracker->timeline_next; + } +} + +#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker) +{ + const char *tracker_state = "IWAF"; + char state_char = 'I'; + char tracker_type[32] = {0}; + + MALI_DEBUG_ASSERT_POINTER(tracker); + + state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)); + _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type)); + +#if defined(CONFIG_SYNC) + if (0 != tracker->trigger_ref_count) { + MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->fence.sync_fd, tracker->sync_fence, tracker->job)); + } else { + MALI_PRINT(("TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->fence.sync_fd, tracker->sync_fence, tracker->job)); + } +#else + if (0 != tracker->trigger_ref_count) { + MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->job)); + } else { + MALI_PRINT(("TL: %s %u %c job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->job)); + } +#endif +} + +void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline) +{ + struct mali_timeline_tracker *tracker = NULL; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + tracker = timeline->tracker_tail; + while (NULL != tracker) { + mali_timeline_debug_direct_print_tracker(tracker); + tracker = tracker->timeline_next; + } +} + +#endif + +void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx) +{ + int i; + int num_printed = 0; + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + /* Print all timelines */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + if (NULL == timeline->tracker_head) continue; + + _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n", + timeline_id_to_string((enum mali_timeline_id)i)); + + mali_timeline_debug_print_timeline(timeline, print_ctx); + num_printed++; + } + + if (0 == num_printed) { + _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n"); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); +} + +#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */ diff --git a/drivers/gpu/arm/utgard/common/mali_timeline.h b/drivers/gpu/arm/utgard/common/mali_timeline.h new file mode 100644 index 000000000000..58d83839f4fe --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_timeline.h @@ -0,0 +1,535 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_TIMELINE_H__ +#define __MALI_TIMELINE_H__ + +#include "mali_osk.h" +#include "mali_ukk.h" +#include "mali_session.h" +#include "mali_kernel_common.h" +#include "mali_spinlock_reentrant.h" +#include "mali_sync.h" +#include "mali_scheduler_types.h" +#include <linux/version.h> + +/** + * Soft job timeout. + * + * Soft jobs have to be signaled as complete after activation. Normally this is done by user space, + * but in order to guarantee that every soft job is completed, we also have a timer. + */ +#define MALI_TIMELINE_TIMEOUT_HZ ((unsigned long) (HZ * 3 / 2)) /* 1500 ms. */ + +/** + * Timeline type. + */ +typedef enum mali_timeline_id { + MALI_TIMELINE_GP = MALI_UK_TIMELINE_GP, /**< GP job timeline. */ + MALI_TIMELINE_PP = MALI_UK_TIMELINE_PP, /**< PP job timeline. */ + MALI_TIMELINE_SOFT = MALI_UK_TIMELINE_SOFT, /**< Soft job timeline. */ + MALI_TIMELINE_MAX = MALI_UK_TIMELINE_MAX +} mali_timeline_id; + +/** + * Used by trackers that should not be added to a timeline (@ref mali_timeline_system_add_tracker). + */ +#define MALI_TIMELINE_NONE MALI_TIMELINE_MAX + +/** + * Tracker type. + */ +typedef enum mali_timeline_tracker_type { + MALI_TIMELINE_TRACKER_GP = 0, /**< Tracker used by GP jobs. */ + MALI_TIMELINE_TRACKER_PP = 1, /**< Tracker used by PP jobs. */ + MALI_TIMELINE_TRACKER_SOFT = 2, /**< Tracker used by soft jobs. */ + MALI_TIMELINE_TRACKER_WAIT = 3, /**< Tracker used for fence wait. */ + MALI_TIMELINE_TRACKER_SYNC = 4, /**< Tracker used for sync fence. */ + MALI_TIMELINE_TRACKER_MAX = 5, +} mali_timeline_tracker_type; + +/** + * Tracker activation error. + */ +typedef u32 mali_timeline_activation_error; +#define MALI_TIMELINE_ACTIVATION_ERROR_NONE 0 +#define MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT (1<<1) +#define MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT (1<<0) + +/** + * Type used to represent a point on a timeline. + */ +typedef u32 mali_timeline_point; + +/** + * Used to represent that no point on a timeline. + */ +#define MALI_TIMELINE_NO_POINT ((mali_timeline_point) 0) + +/** + * The maximum span of points on a timeline. A timeline will be considered full if the difference + * between the oldest and newest points is equal or larger to this value. + */ +#define MALI_TIMELINE_MAX_POINT_SPAN 65536 + +/** + * Magic value used to assert on validity of trackers. + */ +#define MALI_TIMELINE_TRACKER_MAGIC 0xabcdabcd + +struct mali_timeline; +struct mali_timeline_waiter; +struct mali_timeline_tracker; + +/** + * Timeline fence. + */ +struct mali_timeline_fence { + mali_timeline_point points[MALI_TIMELINE_MAX]; /**< For each timeline, a point or MALI_TIMELINE_NO_POINT. */ + s32 sync_fd; /**< A file descriptor representing a sync fence, or -1. */ +}; + +/** + * Timeline system. + * + * The Timeline system has a set of timelines associated with a session. + */ +struct mali_timeline_system { + struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */ + struct mali_timeline *timelines[MALI_TIMELINE_MAX]; /**< The timelines in this system */ + + /* Single-linked list of unused waiter objects. Uses the tracker_next field in tracker. */ + struct mali_timeline_waiter *waiter_empty_list; + + struct mali_session_data *session; /**< Session that owns this system. */ + + mali_bool timer_enabled; /**< Set to MALI_TRUE if soft job timer should be enabled, MALI_FALSE if not. */ + + _mali_osk_wait_queue_t *wait_queue; /**< Wait queue. */ + +#if defined(CONFIG_SYNC) + struct sync_timeline *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */ +#endif /* defined(CONFIG_SYNC) */ +}; + +/** + * Timeline. Each Timeline system will have MALI_TIMELINE_MAX timelines. + */ +struct mali_timeline { + mali_timeline_point point_next; /**< The next available point. */ + mali_timeline_point point_oldest; /**< The oldest point not released. */ + + /* Double-linked list of trackers. Sorted in ascending order by tracker->time_number with + * tail pointing to the tracker with the oldest time. */ + struct mali_timeline_tracker *tracker_head; + struct mali_timeline_tracker *tracker_tail; + + /* Double-linked list of waiters. Sorted in ascending order by waiter->time_number_wait + * with tail pointing to the waiter with oldest wait time. */ + struct mali_timeline_waiter *waiter_head; + struct mali_timeline_waiter *waiter_tail; + + struct mali_timeline_system *system; /**< Timeline system this timeline belongs to. */ + enum mali_timeline_id id; /**< Timeline type. */ + +#if defined(CONFIG_SYNC) + struct sync_timeline *sync_tl; /**< Sync timeline that corresponds to this timeline. */ + mali_bool destroyed; + struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */ +#endif /* defined(CONFIG_SYNC) */ + + /* The following fields are used to time out soft job trackers. */ + _mali_osk_wq_delayed_work_t *delayed_work; + mali_bool timer_active; +}; + +/** + * Timeline waiter. + */ +struct mali_timeline_waiter { + mali_timeline_point point; /**< Point on timeline we are waiting for to be released. */ + struct mali_timeline_tracker *tracker; /**< Tracker that is waiting. */ + + struct mali_timeline_waiter *timeline_next; /**< Next waiter on timeline's waiter list. */ + struct mali_timeline_waiter *timeline_prev; /**< Previous waiter on timeline's waiter list. */ + + struct mali_timeline_waiter *tracker_next; /**< Next waiter on tracker's waiter list. */ +}; + +/** + * Timeline tracker. + */ +struct mali_timeline_tracker { + MALI_DEBUG_CODE(u32 magic); /**< Should always be MALI_TIMELINE_TRACKER_MAGIC for a valid tracker. */ + + mali_timeline_point point; /**< Point on timeline for this tracker */ + + struct mali_timeline_tracker *timeline_next; /**< Next tracker on timeline's tracker list */ + struct mali_timeline_tracker *timeline_prev; /**< Previous tracker on timeline's tracker list */ + + u32 trigger_ref_count; /**< When zero tracker will be activated */ + mali_timeline_activation_error activation_error; /**< Activation error. */ + struct mali_timeline_fence fence; /**< Fence used to create this tracker */ + + /* Single-linked list of waiters. Sorted in order of insertions with + * tail pointing to first waiter. */ + struct mali_timeline_waiter *waiter_head; + struct mali_timeline_waiter *waiter_tail; + +#if defined(CONFIG_SYNC) + /* These are only used if the tracker is waiting on a sync fence. */ + struct mali_timeline_waiter *waiter_sync; /**< A direct pointer to timeline waiter representing sync fence. */ + struct sync_fence_waiter sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */ + struct sync_fence *sync_fence; /**< The sync fence this tracker is waiting on. */ + _mali_osk_list_t sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */ +#endif /* defined(CONFIG_SYNC) */ + + struct mali_timeline_system *system; /**< Timeline system. */ + struct mali_timeline *timeline; /**< Timeline, or NULL if not on a timeline. */ + enum mali_timeline_tracker_type type; /**< Type of tracker. */ + void *job; /**< Owner of tracker. */ + + /* The following fields are used to time out soft job trackers. */ + unsigned long os_tick_create; + unsigned long os_tick_activate; + mali_bool timer_active; +}; + +extern _mali_osk_atomic_t gp_tracker_count; +extern _mali_osk_atomic_t phy_pp_tracker_count; +extern _mali_osk_atomic_t virt_pp_tracker_count; + +/** + * What follows is a set of functions to check the state of a timeline and to determine where on a + * timeline a given point is. Most of these checks will translate the timeline so the oldest point + * on the timeline is aligned with zero. Remember that all of these calculation are done on + * unsigned integers. + * + * The following example illustrates the three different states a point can be in. The timeline has + * been translated to put the oldest point at zero: + * + * + * + * [ point is in forbidden zone ] + * 64k wide + * MALI_TIMELINE_MAX_POINT_SPAN + * + * [ point is on timeline ) ( point is released ] + * + * 0--------------------------##############################--------------------2^32 - 1 + * ^ ^ + * \ | + * oldest point on timeline | + * \ + * next point on timeline + */ + +/** + * Compare two timeline points + * + * Returns true if a is after b, false if a is before or equal to b. + * + * This funcion ignores MALI_TIMELINE_MAX_POINT_SPAN. Wrapping is supported and + * the result will be correct if the points is less then UINT_MAX/2 apart. + * + * @param a Point on timeline + * @param b Point on timeline + * @return MALI_TRUE if a is after b + */ +MALI_STATIC_INLINE mali_bool mali_timeline_point_after(mali_timeline_point a, mali_timeline_point b) +{ + return 0 > ((s32)b) - ((s32)a); +} + +/** + * Check if a point is on timeline. A point is on a timeline if it is greater than, or equal to, + * the oldest point, and less than the next point. + * + * @param timeline Timeline. + * @param point Point on timeline. + * @return MALI_TRUE if point is on timeline, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_point_on(struct mali_timeline *timeline, mali_timeline_point point) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point); + + return (point - timeline->point_oldest) < (timeline->point_next - timeline->point_oldest); +} + +/** + * Check if a point has been released. A point is released if it is older than the oldest point on + * the timeline, newer than the next point, and also not in the forbidden zone. + * + * @param timeline Timeline. + * @param point Point on timeline. + * @return MALI_TRUE if point has been release, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timeline *timeline, mali_timeline_point point) +{ + mali_timeline_point point_normalized; + mali_timeline_point next_normalized; + + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point); + + point_normalized = point - timeline->point_oldest; + next_normalized = timeline->point_next - timeline->point_oldest; + + return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN); +} + +/** + * Check if a point is valid. A point is valid if is on the timeline or has been released. + * + * @param timeline Timeline. + * @param point Point on timeline. + * @return MALI_TRUE if point is valid, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_point_valid(struct mali_timeline *timeline, mali_timeline_point point) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + return mali_timeline_is_point_on(timeline, point) || mali_timeline_is_point_released(timeline, point); +} + +/** + * Check if timeline is empty (has no points on it). A timeline is empty if next == oldest. + * + * @param timeline Timeline. + * @return MALI_TRUE if timeline is empty, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_empty(struct mali_timeline *timeline) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + return timeline->point_next == timeline->point_oldest; +} + +/** + * Check if timeline is full. A valid timeline cannot span more than 64k points (@ref + * MALI_TIMELINE_MAX_POINT_SPAN). + * + * @param timeline Timeline. + * @return MALI_TRUE if timeline is full, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_full(struct mali_timeline *timeline) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + return MALI_TIMELINE_MAX_POINT_SPAN <= (timeline->point_next - timeline->point_oldest); +} + +/** + * Create a new timeline system. + * + * @param session The session this timeline system will belong to. + * @return New timeline system. + */ +struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session); + +/** + * Abort timeline system. + * + * This will release all pending waiters in the timeline system causing all trackers to be + * activated. + * + * @param system Timeline system to abort all jobs from. + */ +void mali_timeline_system_abort(struct mali_timeline_system *system); + +/** + * Destroy an empty timeline system. + * + * @note @ref mali_timeline_system_abort() should be called prior to this function. + * + * @param system Timeline system to destroy. + */ +void mali_timeline_system_destroy(struct mali_timeline_system *system); + +/** + * Stop the soft job timer. + * + * @param system Timeline system + */ +void mali_timeline_system_stop_timer(struct mali_timeline_system *system); + +/** + * Add a tracker to a timeline system and optionally also on a timeline. + * + * Once added to the timeline system, the tracker is guaranteed to be activated. The tracker can be + * activated before this function returns. Thus, it is also possible that the tracker is released + * before this function returns, depending on the tracker type. + * + * @note Tracker must be initialized (@ref mali_timeline_tracker_init) before being added to the + * timeline system. + * + * @param system Timeline system the tracker will be added to. + * @param tracker The tracker to be added. + * @param timeline_id Id of the timeline the tracker will be added to, or + * MALI_TIMELINE_NONE if it should not be added on a timeline. + * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline. + */ +mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system, + struct mali_timeline_tracker *tracker, + enum mali_timeline_id timeline_id); + +/** + * Get latest point on timeline. + * + * @param system Timeline system. + * @param timeline_id Id of timeline to get latest point from. + * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty. + */ +mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system, + enum mali_timeline_id timeline_id); + +/** + * Initialize tracker. + * + * Must be called before tracker is added to timeline system (@ref mali_timeline_system_add_tracker). + * + * @param tracker Tracker to initialize. + * @param type Type of tracker. + * @param fence Fence used to set up dependencies for tracker. + * @param job Pointer to job struct this tracker is associated with. + */ +void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker, + mali_timeline_tracker_type type, + struct mali_timeline_fence *fence, + void *job); + +/** + * Grab trigger ref count on tracker. + * + * This will prevent tracker from being activated until the trigger ref count reaches zero. + * + * @note Tracker must have been initialized (@ref mali_timeline_tracker_init). + * + * @param system Timeline system. + * @param tracker Tracker. + */ +void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker); + +/** + * Release trigger ref count on tracker. + * + * If the trigger ref count reaches zero, the tracker will be activated. + * + * @param system Timeline system. + * @param tracker Tracker. + * @param activation_error Error bitmask if activated with error, or MALI_TIMELINE_ACTIVATION_ERROR_NONE if no error. + * @return Scheduling bitmask. + */ +mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error); + +/** + * Release a tracker from the timeline system. + * + * This is used to signal that the job being tracker is finished, either due to normal circumstances + * (job complete/abort) or due to a timeout. + * + * We may need to schedule some subsystems after a tracker has been released and the returned + * bitmask will tell us if it is necessary. If the return value is non-zero, this value needs to be + * sent as an input parameter to @ref mali_scheduler_schedule_from_mask() to do the scheduling. + * + * @note Tracker must have been activated before being released. + * @warning Not calling @ref mali_scheduler_schedule_from_mask() after releasing a tracker can lead + * to a deadlock. + * + * @param tracker Tracker being released. + * @return Scheduling bitmask. + */ +mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker); + +MALI_STATIC_INLINE mali_bool mali_timeline_tracker_activation_error( + struct mali_timeline_tracker *tracker) +{ + MALI_DEBUG_ASSERT_POINTER(tracker); + return (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT & + tracker->activation_error) ? MALI_TRUE : MALI_FALSE; +} + +/** + * Copy data from a UK fence to a Timeline fence. + * + * @param fence Timeline fence. + * @param uk_fence UK fence. + */ +void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence); + +void mali_timeline_initialize(void); + +void mali_timeline_terminate(void); + +MALI_STATIC_INLINE mali_bool mali_timeline_has_gp_job(void) +{ + return 0 < _mali_osk_atomic_read(&gp_tracker_count); +} + +MALI_STATIC_INLINE mali_bool mali_timeline_has_physical_pp_job(void) +{ + return 0 < _mali_osk_atomic_read(&phy_pp_tracker_count); +} + +MALI_STATIC_INLINE mali_bool mali_timeline_has_virtual_pp_job(void) +{ + return 0 < _mali_osk_atomic_read(&virt_pp_tracker_count); +} + +#if defined(DEBUG) +#define MALI_TIMELINE_DEBUG_FUNCTIONS +#endif /* DEBUG */ +#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS) + +/** + * Tracker state. Used for debug printing. + */ +typedef enum mali_timeline_tracker_state { + MALI_TIMELINE_TS_INIT = 0, + MALI_TIMELINE_TS_WAITING = 1, + MALI_TIMELINE_TS_ACTIVE = 2, + MALI_TIMELINE_TS_FINISH = 3, +} mali_timeline_tracker_state; + +/** + * Get tracker state. + * + * @param tracker Tracker to check. + * @return State of tracker. + */ +mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker); + +/** + * Print debug information about tracker. + * + * @param tracker Tracker to print. + */ +void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx); + +/** + * Print debug information about timeline. + * + * @param timeline Timeline to print. + */ +void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx); + +#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker); +void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline); +#endif + +/** + * Print debug information about timeline system. + * + * @param system Timeline system to print. + */ +void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx); + +#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */ + +#endif /* __MALI_TIMELINE_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c new file mode 100644 index 000000000000..3c58928dd3a2 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_timeline_fence_wait.h" + +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_spinlock_reentrant.h" + +/** + * Allocate a fence waiter tracker. + * + * @return New fence waiter if successful, NULL if not. + */ +static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void) +{ + return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker)); +} + +/** + * Free fence waiter tracker. + * + * @param wait Fence wait tracker to free. + */ +static void mali_timeline_fence_wait_tracker_free(struct mali_timeline_fence_wait_tracker *wait) +{ + MALI_DEBUG_ASSERT_POINTER(wait); + _mali_osk_atomic_term(&wait->refcount); + _mali_osk_free(wait); +} + +/** + * Check if fence wait tracker has been activated. Used as a wait queue condition. + * + * @param data Fence waiter. + * @return MALI_TRUE if tracker has been activated, MALI_FALSE if not. + */ +static mali_bool mali_timeline_fence_wait_tracker_is_activated(void *data) +{ + struct mali_timeline_fence_wait_tracker *wait; + + wait = (struct mali_timeline_fence_wait_tracker *) data; + MALI_DEBUG_ASSERT_POINTER(wait); + + return wait->activated; +} + +/** + * Check if fence has been signaled. + * + * @param system Timeline system. + * @param fence Timeline fence. + * @return MALI_TRUE if fence is signaled, MALI_FALSE if not. + */ +static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence) +{ + int i; + u32 tid = _mali_osk_get_tid(); + mali_bool ret = MALI_TRUE; +#if defined(CONFIG_SYNC) + struct sync_fence *sync_fence = NULL; +#endif + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(fence); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline; + mali_timeline_point point; + + point = fence->points[i]; + + if (likely(MALI_TIMELINE_NO_POINT == point)) { + /* Fence contains no point on this timeline. */ + continue; + } + + timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + if (unlikely(!mali_timeline_is_point_valid(timeline, point))) { + MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next)); + } + + if (!mali_timeline_is_point_released(timeline, point)) { + ret = MALI_FALSE; + goto exit; + } + } + +#if defined(CONFIG_SYNC) + if (-1 != fence->sync_fd) { + sync_fence = sync_fence_fdget(fence->sync_fd); + if (likely(NULL != sync_fence)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + if (0 == sync_fence->status) { +#else + if (0 == atomic_read(&sync_fence->status)) { +#endif + ret = MALI_FALSE; + } + } else { + MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd)); + } + } +#endif /* defined(CONFIG_SYNC) */ + +exit: + mali_spinlock_reentrant_signal(system->spinlock, tid); + +#if defined(CONFIG_SYNC) + if (NULL != sync_fence) { + sync_fence_put(sync_fence); + } +#endif /* defined(CONFIG_SYNC) */ + + return ret; +} + +mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout) +{ + struct mali_timeline_fence_wait_tracker *wait; + mali_timeline_point point; + mali_bool ret; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(fence); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n")); + + if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) { + return mali_timeline_fence_wait_check_status(system, fence); + } + + wait = mali_timeline_fence_wait_tracker_alloc(); + if (unlikely(NULL == wait)) { + MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n")); + return MALI_FALSE; + } + + wait->activated = MALI_FALSE; + wait->system = system; + + /* Initialize refcount to two references. The reference first will be released by this + * function after the wait is over. The second reference will be released when the tracker + * is activated. */ + _mali_osk_atomic_init(&wait->refcount, 2); + + /* Add tracker to timeline system, but not to a timeline. */ + mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait); + point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point); + MALI_IGNORE(point); + + /* Wait for the tracker to be activated or time out. */ + if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) { + _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait); + } else { + _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout); + } + + ret = wait->activated; + + if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { + mali_timeline_fence_wait_tracker_free(wait); + } + + return ret; +} + +void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(wait); + MALI_DEBUG_ASSERT_POINTER(wait->system); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n")); + + MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated); + wait->activated = MALI_TRUE; + + _mali_osk_wait_queue_wake_up(wait->system->wait_queue); + + /* Nothing can wait on this tracker, so nothing to schedule after release. */ + schedule_mask = mali_timeline_tracker_release(&wait->tracker); + MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask); + MALI_IGNORE(schedule_mask); + + if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { + mali_timeline_fence_wait_tracker_free(wait); + } +} diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h new file mode 100644 index 000000000000..f5440ab6fc6d --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_timeline_fence_wait.h + * + * This file contains functions used to wait until a Timeline fence is signaled. + */ + +#ifndef __MALI_TIMELINE_FENCE_WAIT_H__ +#define __MALI_TIMELINE_FENCE_WAIT_H__ + +#include "mali_osk.h" +#include "mali_timeline.h" + +/** + * If used as the timeout argument in @ref mali_timeline_fence_wait, a timer is not used and the + * function only returns when the fence is signaled. + */ +#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER ((u32) -1) + +/** + * If used as the timeout argument in @ref mali_timeline_fence_wait, the function will return + * immediately with the current state of the fence. + */ +#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY 0 + +/** + * Fence wait tracker. + * + * The fence wait tracker is added to the Timeline system with the fence we are waiting on as a + * dependency. We will then perform a blocking wait, possibly with a timeout, until the tracker is + * activated, which happens when the fence is signaled. + */ +struct mali_timeline_fence_wait_tracker { + mali_bool activated; /**< MALI_TRUE if the tracker has been activated, MALI_FALSE if not. */ + _mali_osk_atomic_t refcount; /**< Reference count. */ + struct mali_timeline_system *system; /**< Timeline system. */ + struct mali_timeline_tracker tracker; /**< Timeline tracker. */ +}; + +/** + * Wait for a fence to be signaled, or timeout is reached. + * + * @param system Timeline system. + * @param fence Fence to wait on. + * @param timeout Timeout in ms, or MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER or + * MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY. + * @return MALI_TRUE if signaled, MALI_FALSE if timed out. + */ +mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout); + +/** + * Used by the Timeline system to activate a fence wait tracker. + * + * @param fence_wait_tracker Fence waiter tracker. + */ +void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *fence_wait_tracker); + +#endif /* __MALI_TIMELINE_FENCE_WAIT_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c new file mode 100644 index 000000000000..73843f07c990 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_timeline_sync_fence.h" + +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_sync.h" + +#if defined(CONFIG_SYNC) + +/** + * Creates a sync fence tracker and a sync fence. Adds sync fence tracker to Timeline system and + * returns sync fence. The sync fence will be signaled when the sync fence tracker is activated. + * + * @param timeline Timeline. + * @param point Point on timeline. + * @return Sync fence that will be signaled when tracker is activated. + */ +static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point) +{ + struct mali_timeline_sync_fence_tracker *sync_fence_tracker; + struct sync_fence *sync_fence; + struct mali_timeline_fence fence; + + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point); + + /* Allocate sync fence tracker. */ + sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker)); + if (NULL == sync_fence_tracker) { + MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n")); + return NULL; + } + + /* Create sync flag. */ + MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl); + sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point); + if (NULL == sync_fence_tracker->flag) { + MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n")); + _mali_osk_free(sync_fence_tracker); + return NULL; + } + + /* Create sync fence from sync flag. */ + sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag); + if (NULL == sync_fence) { + MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n")); + mali_sync_flag_put(sync_fence_tracker->flag); + _mali_osk_free(sync_fence_tracker); + return NULL; + } + + /* Setup fence for tracker. */ + _mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence)); + fence.sync_fd = -1; + fence.points[timeline->id] = point; + + /* Finally, add the tracker to Timeline system. */ + mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker); + point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point); + + return sync_fence; +} + +s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence) +{ + u32 i; + struct sync_fence *sync_fence_acc = NULL; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(fence); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline; + struct sync_fence *sync_fence; + + if (MALI_TIMELINE_NO_POINT == fence->points[i]) continue; + + timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]); + if (NULL == sync_fence) goto error; + + if (NULL != sync_fence_acc) { + /* Merge sync fences. */ + sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence); + if (NULL == sync_fence_acc) goto error; + } else { + /* This was the first sync fence created. */ + sync_fence_acc = sync_fence; + } + } + + if (-1 != fence->sync_fd) { + struct sync_fence *sync_fence; + + sync_fence = sync_fence_fdget(fence->sync_fd); + if (NULL == sync_fence) goto error; + + if (NULL != sync_fence_acc) { + sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence); + if (NULL == sync_fence_acc) goto error; + } else { + sync_fence_acc = sync_fence; + } + } + + if (NULL == sync_fence_acc) { + MALI_DEBUG_ASSERT_POINTER(system->signaled_sync_tl); + + /* There was nothing to wait on, so return an already signaled fence. */ + + sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl); + if (NULL == sync_fence_acc) goto error; + } + + /* Return file descriptor for the accumulated sync fence. */ + return mali_sync_fence_fd_alloc(sync_fence_acc); + +error: + if (NULL != sync_fence_acc) { + sync_fence_put(sync_fence_acc); + } + + return -1; +} + +void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker); + MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n")); + + /* Signal flag and release reference. */ + mali_sync_flag_signal(sync_fence_tracker->flag, 0); + mali_sync_flag_put(sync_fence_tracker->flag); + + /* Nothing can wait on this tracker, so nothing to schedule after release. */ + schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker); + MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask); + + _mali_osk_free(sync_fence_tracker); +} + +#endif /* defined(CONFIG_SYNC) */ diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h new file mode 100644 index 000000000000..29a3822457e9 --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_timeline_sync_fence.h + * + * This file contains code related to creating sync fences from timeline fences. + */ + +#ifndef __MALI_TIMELINE_SYNC_FENCE_H__ +#define __MALI_TIMELINE_SYNC_FENCE_H__ + +#include "mali_timeline.h" + +#if defined(CONFIG_SYNC) + +/** + * Sync fence tracker. + */ +struct mali_timeline_sync_fence_tracker { + struct mali_sync_flag *flag; /**< Sync flag used to connect tracker and sync fence. */ + struct mali_timeline_tracker tracker; /**< Timeline tracker. */ +}; + +/** + * Create a sync fence that will be signaled when @ref fence is signaled. + * + * @param system Timeline system. + * @param fence Fence to create sync fence from. + * @return File descriptor for new sync fence, or -1 on error. + */ +s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence); + +/** + * Used by the Timeline system to activate a sync fence tracker. + * + * @param sync_fence_tracker Sync fence tracker. + * + */ +void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker); + +#endif /* defined(CONFIG_SYNC) */ + +#endif /* __MALI_TIMELINE_SYNC_FENCE_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_ukk.h b/drivers/gpu/arm/utgard/common/mali_ukk.h new file mode 100644 index 000000000000..597685a53f3b --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_ukk.h @@ -0,0 +1,551 @@ +/* + * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_ukk.h + * Defines the kernel-side interface of the user-kernel interface + */ + +#ifndef __MALI_UKK_H__ +#define __MALI_UKK_H__ + +#include "mali_osk.h" +#include "mali_uk_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup uddapi Unified Device Driver (UDD) APIs + * + * @{ + */ + +/** + * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs + * + * - The _mali_uk functions are an abstraction of the interface to the device + * driver. On certain OSs, this would be implemented via the IOCTL interface. + * On other OSs, it could be via extension of some Device Driver Class, or + * direct function call for Bare metal/RTOSs. + * - It is important to note that: + * - The Device Driver has implemented the _mali_ukk set of functions + * - The Base Driver calls the corresponding set of _mali_uku functions. + * - What requires porting is solely the calling mechanism from User-side to + * Kernel-side, and propagating back the results. + * - Each U/K function is associated with a (group, number) pair from + * \ref _mali_uk_functions to make it possible for a common function in the + * Base Driver and Device Driver to route User/Kernel calls from/to the + * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number + * would be formed based on the group and number assigned to the _mali_uk + * function, as listed in \ref _mali_uk_functions. On the user-side, each + * _mali_uku function would just make an IOCTL with the IOCTL-code being an + * encoded form of the (group, number) pair. On the kernel-side, the Device + * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number) + * pair, and uses this to determine which corresponding _mali_ukk should be + * called. + * - Refer to \ref _mali_uk_functions for more information about this + * (group, number) pairing. + * - In a system where there is no distinction between user and kernel-side, + * the U/K interface may be implemented as:@code + * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args ) + * { + * return mali_ukk_examplefunction( args ); + * } + * @endcode + * - Therefore, all U/K calls behave \em as \em though they were direct + * function calls (but the \b implementation \em need \em not be a direct + * function calls) + * + * @note Naming the _mali_uk functions the same on both User and Kernel sides + * on non-RTOS systems causes debugging issues when setting breakpoints. In + * this case, it is not clear which function the breakpoint is put on. + * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku + * and in kernel space with \c _mali_ukk. The naming for the argument + * structures is unaffected. + * + * - The _mali_uk functions are synchronous. + * - Arguments to the _mali_uk functions are passed in a structure. The only + * parameter passed to the _mali_uk functions is a pointer to this structure. + * This first member of this structure, ctx, is a pointer to a context returned + * by _mali_uku_open(). For example:@code + * typedef struct + * { + * void *ctx; + * u32 number_of_cores; + * } _mali_uk_get_gp_number_of_cores_s; + * @endcode + * + * - Each _mali_uk function has its own argument structure named after the + * function. The argument is distinguished by the _s suffix. + * - The argument types are defined by the base driver and user-kernel + * interface. + * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t. + * - Only arguments of type input or input/output need be initialized before + * calling a _mali_uk function. + * - Arguments of type output and input/output are only valid when the + * _mali_uk function returns \ref _MALI_OSK_ERR_OK. + * - The \c ctx member is always invalid after it has been used by a + * _mali_uk function, except for the context management functions + * + * + * \b Interface \b restrictions + * + * The requirements of the interface mean that an implementation of the + * User-kernel interface may do no 'real' work. For example, the following are + * illegal in the User-kernel implementation: + * - Calling functions necessary for operation on all systems, which would + * not otherwise get called on RTOS systems. + * - For example, a U/K interface that calls multiple _mali_ukk functions + * during one particular U/K call. This could not be achieved by the same code + * which uses direct function calls for the U/K interface. + * - Writing in values to the args members, when otherwise these members would + * not hold a useful value for a direct function call U/K interface. + * - For example, U/K interface implementation that take NULL members in + * their arguments structure from the user side, but those members are + * replaced with non-NULL values in the kernel-side of the U/K interface + * implementation. A scratch area for writing data is one such example. In this + * case, a direct function call U/K interface would segfault, because no code + * would be present to replace the NULL pointer with a meaningful pointer. + * - Note that we discourage the case where the U/K implementation changes + * a NULL argument member to non-NULL, and then the Device Driver code (outside + * of the U/K layer) re-checks this member for NULL, and corrects it when + * necessary. Whilst such code works even on direct function call U/K + * intefaces, it reduces the testing coverage of the Device Driver code. This + * is because we have no way of testing the NULL == value path on an OS + * implementation. + * + * A number of allowable examples exist where U/K interfaces do 'real' work: + * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info + * - In this case, without the pointer switching on direct function call + * U/K interface, the Device Driver code still sees the same thing: a pointer + * to which it can write memory. This is because such a system has no + * distinction between a user and kernel pointer. + * - Writing an OS-specific value into the ukk_private member for + * _mali_ukk_mem_mmap(). + * - In this case, this value is passed around by Device Driver code, but + * its actual value is never checked. Device Driver code simply passes it from + * the U/K layer to the OSK layer, where it can be acted upon. In this case, + * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK + * (_mali_osk_mem_mapregion_init()) functions will collaborate on the + * meaning of ukk_private member. On other OSs, it may be unused by both + * U/K and OSK layers + * - Therefore, on error inside the U/K interface implementation itself, + * it will be as though the _mali_ukk function itself had failed, and cleaned + * up after itself. + * - Compare this to a direct function call U/K implementation, where all + * error cleanup is handled by the _mali_ukk function itself. The direct + * function call U/K interface implementation is automatically atomic. + * + * The last example highlights a consequence of all U/K interface + * implementations: they must be atomic with respect to the Device Driver code. + * And therefore, should Device Driver code succeed but the U/K implementation + * fail afterwards (but before return to user-space), then the U/K + * implementation must cause appropriate cleanup actions to preserve the + * atomicity of the interface. + * + * @{ + */ + + +/** @defgroup _mali_uk_context U/K Context management + * + * These functions allow for initialisation of the user-kernel interface once per process. + * + * Generally the context will store the OS specific object to communicate with the kernel device driver and further + * state information required by the specific implementation. The context is shareable among all threads in the caller process. + * + * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver. + * + * On a bare-metal/RTOS system with no distinction between kernel and + * user-space, the U/K interface simply calls the _mali_ukk variant of the + * function by direct function call. In this case, the context returned is the + * mali_session_data from _mali_ukk_open(). + * + * The kernel side implementations of the U/K interface expect the first member of the argument structure to + * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context + * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context + * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter + * in the argument structure therefore has to be of type input/output. + * + * It should be noted that the caller cannot reuse the \c ctx member of U/K + * argument structure after a U/K call, because it may be overwritten. Instead, + * the context handle must always be stored elsewhere, and copied into + * the appropriate U/K argument structure for each user-side call to + * the U/K interface. This is not usually a problem, since U/K argument + * structures are usually placed on the stack. + * + * @{ */ + +/** @brief Begin a new Mali Device Driver session + * + * This is used to obtain a per-process context handle for all future U/K calls. + * + * @param context pointer to storage to return a (void*)context handle. + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_open(void **context); + +/** @brief End a Mali Device Driver session + * + * This should be called when the process no longer requires use of the Mali Device Driver. + * + * The context handle must not be used after it has been closed. + * + * @param context pointer to a stored (void*)context handle. + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_close(void **context); + +/** @} */ /* end group _mali_uk_context */ + + +/** @addtogroup _mali_uk_core U/K Core + * + * The core functions provide the following functionality: + * - verify that the user and kernel API are compatible + * - retrieve information about the cores and memory banks in the system + * - wait for the result of jobs started on a core + * + * @{ */ + +/** @brief Waits for a job notification. + * + * Sleeps until notified or a timeout occurs. Returns information about the notification. + * + * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args); + +/** @brief Post a notification to the notification queue of this application. + * + * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args); + +/** @brief Verifies if the user and kernel side of this API are compatible. + * + * This function is obsolete, but kept to allow old, incompatible user space + * clients to robustly detect the incompatibility. + * + * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args); + +/** @brief Verifies if the user and kernel side of this API are compatible. + * + * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args); + +/** @brief Get the user space settings applicable for calling process. + * + * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args); + +/** @brief Get a user space setting applicable for calling process. + * + * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args); + +/* @brief Grant or deny high priority scheduling for this session. + * + * @param args see _mali_uk_request_high_priority_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args); + +/** @brief Make process sleep if the pending big job in kernel >= MALI_MAX_PENDING_BIG_JOB + * + */ +_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args); + +/** @} */ /* end group _mali_uk_core */ + + +/** @addtogroup _mali_uk_memory U/K Memory + * + * The memory functions provide functionality with and without a Mali-MMU present. + * + * For Mali-MMU based systems, the following functionality is provided: + * - Initialize and terminate MALI virtual address space + * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the + * current process address space + * - Map/unmap external physical memory into the MALI virtual address range + * + * For Mali-nonMMU based systems: + * - Allocate/deallocate MALI memory + * + * @{ */ + +/** @brief Map Mali Memory into the current user process + * + * Maps Mali memory into the current user process in a generic way. + * + * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes, + * but should not be called by a user process in Mali-nonMMU mode. + * + * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU + * or Mali-nonMMU: + * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K + * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are + * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired. + * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr + * member is used for the \em Mali-virtual address desired for the mapping. The + * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual + * and CPU-physical addresses, and can cope with mapping a contiguous virtual + * address range to a sequence of non-contiguous physical pages. In this case, + * the CPU-physical addresses are not communicated back to the user-side, as + * they are unnecsessary; the \em Mali-virtual address range must be used for + * programming Mali structures. + * + * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of + * CPU-virtual and CPU-physical ranges, but the \em caller must manage the + * \em Mali-virtual address range from the user-side. + * + * @note Mali-virtual address ranges are entirely separate between processes. + * It is not possible for a process to accidentally corrupt another process' + * \em Mali-virtual address space. + * + * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_mem_mmap(_mali_uk_mem_mmap_s *args); + +/** @brief Unmap Mali Memory from the current user process + * + * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied + * from _mali_ukk_mem_mmap(). + * + * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_mem_munmap(_mali_uk_mem_munmap_s *args); + +/** @brief Determine the buffer size necessary for an MMU page table dump. + * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args); +/** @brief Dump MMU Page tables. + * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args); + +/** @brief Write user data to specified Mali memory without causing segfaults. + * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args); + +/** @} */ /* end group _mali_uk_memory */ + + +/** @addtogroup _mali_uk_pp U/K Fragment Processor + * + * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality: + * - retrieving version of the fragment processors + * - determine number of fragment processors + * - starting a job on a fragment processor + * + * @{ */ + +/** @brief Issue a request to start a new job on a Fragment Processor. + * + * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can + * try to start the job again. + * + * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job + * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the + * existing one returned, otherwise the new job is started and the status field args->status is set to + * _MALI_UK_START_JOB_STARTED. + * + * Job completion can be awaited with _mali_ukk_wait_for_notification(). + * + * @param ctx user-kernel context (mali_session) + * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data! + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs); + +/** + * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor. + * + * @note Will call into @ref _mali_ukk_pp_start_job and @ref _mali_ukk_gp_start_job. + * + * @param ctx user-kernel context (mali_session) + * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data! + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs); + +/** @brief Returns the number of Fragment Processors in the system + * + * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args); + +/** @brief Returns the version that all Fragment Processor cores are compatible with. + * + * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment + * Processor core is available. + * + * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args); + +/** @brief Disable Write-back unit(s) on specified job + * + * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h" + */ +void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args); + + +/** @} */ /* end group _mali_uk_pp */ + + +/** @addtogroup _mali_uk_gp U/K Vertex Processor + * + * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality: + * - retrieving version of the Vertex Processors + * - determine number of Vertex Processors available + * - starting a job on a Vertex Processor + * + * @{ */ + +/** @brief Issue a request to start a new job on a Vertex Processor. + * + * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can + * try to start the job again. + * + * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job + * which the hardware hasn't actually started processing yet. In this case the new job will be started and the + * existing one returned, otherwise the new job is started and the status field args->status is set to + * _MALI_UK_START_JOB_STARTED. + * + * Job completion can be awaited with _mali_ukk_wait_for_notification(). + * + * @param ctx user-kernel context (mali_session) + * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data! + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs); + +/** @brief Returns the number of Vertex Processors in the system. + * + * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args); + +/** @brief Returns the version that all Vertex Processor cores are compatible with. + * + * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex + * Processor core is available. + * + * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args); + +/** @brief Resume or abort suspended Vertex Processor jobs. + * + * After receiving notification that a Vertex Processor job was suspended from + * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job. + * + * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args); + +/** @} */ /* end group _mali_uk_gp */ + +#if defined(CONFIG_MALI400_PROFILING) +/** @addtogroup _mali_uk_profiling U/K Timeline profiling module + * @{ */ + +/** @brief Add event to profiling buffer. + * + * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args); + +/** @brief Get profiling stream fd. + * + * @param args see _mali_uk_profiling_stream_fd_get_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args); + +/** @brief Profiling control set. + * + * @param args see _mali_uk_profiling_control_set_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args); + +/** @} */ /* end group _mali_uk_profiling */ +#endif + +/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module + * @{ */ + +/** @brief Report events related to vsync. + * + * @note Events should be reported when starting to wait for vsync and when the + * waiting is finished. This information can then be used in kernel space to + * complement the GPU utilization metric. + * + * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args); + +/** @} */ /* end group _mali_uk_vsync */ + +/** @addtogroup _mali_sw_counters_report U/K Software counter reporting + * @{ */ + +/** @brief Report software counters. + * + * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args); + +/** @} */ /* end group _mali_sw_counters_report */ + +/** @} */ /* end group u_k_api */ + +/** @} */ /* end group uddapi */ + +u32 _mali_ukk_report_memory_usage(void); + +u32 _mali_ukk_report_total_memory_size(void); + +u32 _mali_ukk_utilization_gp_pp(void); + +u32 _mali_ukk_utilization_gp(void); + +u32 _mali_ukk_utilization_pp(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_UKK_H__ */ diff --git a/drivers/gpu/arm/utgard/common/mali_user_settings_db.c b/drivers/gpu/arm/utgard/common/mali_user_settings_db.c new file mode 100644 index 000000000000..54e1580fad1a --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_user_settings_db.c @@ -0,0 +1,147 @@ +/** + * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" +#include "mali_uk_types.h" +#include "mali_user_settings_db.h" +#include "mali_session.h" + +static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX]; +const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS; + +static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value) +{ + mali_bool done = MALI_FALSE; + + /* + * This function gets a bit complicated because we can't hold the session lock while + * allocating notification objects. + */ + + while (!done) { + u32 i; + u32 num_sessions_alloc; + u32 num_sessions_with_lock; + u32 used_notification_objects = 0; + _mali_osk_notification_t **notobjs; + + /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */ + num_sessions_alloc = mali_session_get_count(); + if (0 == num_sessions_alloc) { + /* No sessions to report to */ + return; + } + + notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc); + if (NULL == notobjs) { + MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n")); + return; + } + + for (i = 0; i < num_sessions_alloc; i++) { + notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED, + sizeof(_mali_uk_settings_changed_s)); + if (NULL != notobjs[i]) { + _mali_uk_settings_changed_s *data; + data = notobjs[i]->result_buffer; + + data->setting = setting; + data->value = value; + } else { + MALI_PRINT_ERROR(("Failed to notify user space session about setting change (alloc failure %u)\n", i)); + } + } + + mali_session_lock(); + + /* number of sessions will not change while we hold the lock */ + num_sessions_with_lock = mali_session_get_count(); + + if (num_sessions_alloc >= num_sessions_with_lock) { + /* We have allocated enough notification objects for all the sessions atm */ + struct mali_session_data *session, *tmp; + MALI_SESSION_FOREACH(session, tmp, link) { + MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc); + if (NULL != notobjs[used_notification_objects]) { + mali_session_send_notification(session, notobjs[used_notification_objects]); + notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */ + } + used_notification_objects++; + } + done = MALI_TRUE; + } + + mali_session_unlock(); + + /* Delete any remaining/unused notification objects */ + for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) { + if (NULL != notobjs[used_notification_objects]) { + _mali_osk_notification_delete(notobjs[used_notification_objects]); + } + } + + _mali_osk_free(notobjs); + } +} + +void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value) +{ + mali_bool notify = MALI_FALSE; + + if (setting >= _MALI_UK_USER_SETTING_MAX) { + MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n")); + return; + } + + if (mali_user_settings[setting] != value) { + notify = MALI_TRUE; + } + + mali_user_settings[setting] = value; + + if (notify) { + mali_user_settings_notify(setting, value); + } +} + +u32 mali_get_user_setting(_mali_uk_user_setting_t setting) +{ + if (setting >= _MALI_UK_USER_SETTING_MAX) { + return 0; + } + + return mali_user_settings[setting]; +} + +_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args) +{ + _mali_uk_user_setting_t setting; + MALI_DEBUG_ASSERT_POINTER(args); + + setting = args->setting; + + if (_MALI_UK_USER_SETTING_MAX > setting) { + args->value = mali_user_settings[setting]; + return _MALI_OSK_ERR_OK; + } else { + return _MALI_OSK_ERR_INVALID_ARGS; + } +} + +_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + + _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings)); + + return _MALI_OSK_ERR_OK; +} diff --git a/drivers/gpu/arm/utgard/common/mali_user_settings_db.h b/drivers/gpu/arm/utgard/common/mali_user_settings_db.h new file mode 100644 index 000000000000..0732c3e56e2a --- /dev/null +++ b/drivers/gpu/arm/utgard/common/mali_user_settings_db.h @@ -0,0 +1,39 @@ +/** + * Copyright (C) 2012-2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_USER_SETTINGS_DB_H__ +#define __MALI_USER_SETTINGS_DB_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mali_uk_types.h" + +/** @brief Set Mali user setting in DB + * + * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change. + * + * @param setting the setting to be changed + * @param value the new value to set + */ +void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value); + +/** @brief Get current Mali user setting value from DB + * + * @param setting the setting to extract + * @return the value of the selected setting + */ +u32 mali_get_user_setting(_mali_uk_user_setting_t setting); + +#ifdef __cplusplus +} +#endif +#endif /* __MALI_KERNEL_USER_SETTING__ */ |