aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_perf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_perf.c')
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c114
1 files changed, 56 insertions, 58 deletions
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9ebf99f3d8d3..39a4804091d7 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream,
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct intel_context *ce;
int ret;
@@ -1364,7 +1364,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
if (stream->ctx)
@@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto unlock;
}
- ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
- if (ret)
- goto err_unref;
+ i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
/* PreHSW required 512K alignment, HSW requires 16M */
vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
@@ -1629,13 +1627,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* It's fine to put out-of-date values into these per-context registers
* in the case that the OA unit has been disabled.
*/
-static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
+static void
+gen8_update_reg_state_unlocked(struct intel_context *ce,
+ u32 *reg_state,
+ const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *dev_priv = ctx->i915;
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ struct drm_i915_private *i915 = ce->gem_context->i915;
+ u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1649,8 +1648,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
int i;
CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
@@ -1678,10 +1677,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
CTX_REG(reg_state, state_offset, flex_regs[i], value);
}
- CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- gen8_make_rpcs(dev_priv,
- &to_intel_context(ctx,
- dev_priv->engine[RCS])->sseu));
+ CTX_REG(reg_state,
+ CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+ gen8_make_rpcs(i915, &ce->sseu));
}
/*
@@ -1711,7 +1709,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS0];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
@@ -1740,11 +1738,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 *regs;
/* OA settings will be set upon first use */
- if (!ce->state)
+ if (!ce || !ce->state)
continue;
regs = i915_gem_object_pin_map(ce->state->obj, map_type);
@@ -1754,7 +1752,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
ce->state->obj->mm.dirty = true;
regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
- gen8_update_reg_state_unlocked(ctx, regs, oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, oa_config);
i915_gem_object_unpin_map(ce->state->obj);
}
@@ -1922,10 +1920,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = &stream->dev_priv->uncore;
- I915_WRITE(GEN7_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN7_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -1933,10 +1931,10 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = &stream->dev_priv->uncore;
- I915_WRITE(GEN8_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN8_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -2093,7 +2091,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* references will effectively disable RC6.
*/
stream->wakeref = intel_runtime_pm_get(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
if (ret)
@@ -2127,7 +2125,7 @@ err_lock:
err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
err_config:
@@ -2138,17 +2136,17 @@ err_config:
}
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- u32 *reg_state)
+ struct intel_context *ce,
+ u32 *regs)
{
struct i915_perf_stream *stream;
- if (engine->id != RCS)
+ if (engine->class != RENDER_CLASS)
return;
stream = engine->i915->perf.oa.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
}
/**
@@ -2881,12 +2879,24 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
- if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 11) {
+ i915_perf_load_test_config_icl(dev_priv);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ i915_perf_load_test_config_cnl(dev_priv);
+ } else if (IS_COFFEELAKE(dev_priv)) {
+ if (IS_CFL_GT2(dev_priv))
+ i915_perf_load_test_config_cflgt2(dev_priv);
+ if (IS_CFL_GT3(dev_priv))
+ i915_perf_load_test_config_cflgt3(dev_priv);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ i915_perf_load_test_config_glk(dev_priv);
+ } else if (IS_KABYLAKE(dev_priv)) {
+ if (IS_KBL_GT2(dev_priv))
+ i915_perf_load_test_config_kblgt2(dev_priv);
+ else if (IS_KBL_GT3(dev_priv))
+ i915_perf_load_test_config_kblgt3(dev_priv);
+ } else if (IS_BROXTON(dev_priv)) {
+ i915_perf_load_test_config_bxt(dev_priv);
} else if (IS_SKYLAKE(dev_priv)) {
if (IS_SKL_GT2(dev_priv))
i915_perf_load_test_config_sklgt2(dev_priv);
@@ -2894,25 +2904,13 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_sklgt3(dev_priv);
else if (IS_SKL_GT4(dev_priv))
i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_ICELAKE(dev_priv)) {
- i915_perf_load_test_config_icl(dev_priv);
- }
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ i915_perf_load_test_config_chv(dev_priv);
+ } else if (IS_BROADWELL(dev_priv)) {
+ i915_perf_load_test_config_bdw(dev_priv);
+ } else if (IS_HASWELL(dev_priv)) {
+ i915_perf_load_test_config_hsw(dev_priv);
+}
if (dev_priv->perf.oa.test_config.id == 0)
goto sysfs_error;