aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/dc/dcn20
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/dcn20')
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h178
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h170
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c199
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c167
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c1156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c1976
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h152
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h199
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c372
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2003
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h7
32 files changed, 5164 insertions, 2749 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index e9721a906592..abaed2121feb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -1,22 +1,13 @@
+# SPDX-License-Identifier: MIT
#
# Makefile for DCN.
-DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
-ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DCN20 += dcn20_dsc.o
-endif
-
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
-
-CFLAGS_dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 51a3dfe97f0e..5999b2da3a01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -50,26 +50,28 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
- ASSERT(req_dppclk <= ref_dppclk);
- /* need to clamp to 8 bits */
- if (ref_dppclk > 0xff) {
- int divider = (ref_dppclk + 0xfe) / 0xff;
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
- ref_dppclk /= divider;
- req_dppclk = (req_dppclk + divider - 1) / divider;
- if (req_dppclk > ref_dppclk)
- req_dppclk = ref_dppclk;
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
}
+
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
+
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
@@ -94,34 +96,49 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
return;
}
-void dccg2_init(struct dccg *dccg)
+void dccg2_set_fifo_errdet_ovr_en(struct dccg *dccg,
+ bool en)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- // Fallthrough intentional to program all available dpp_dto's
- switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
- case 6:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
- case 5:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
- case 4:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
- case 3:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
- case 2:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
- case 1:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
- break;
- default:
- ASSERT(false);
- break;
- }
+ REG_UPDATE(DISPCLK_FREQ_CHANGE_CNTL,
+ DCCG_FIFO_ERRDET_OVR_EN, en ? 1 : 0);
+}
+
+void dccg2_otg_add_pixel(struct dccg *dccg,
+ uint32_t otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_ADD_PIXEL[otg_inst], 0,
+ OTG_DROP_PIXEL[otg_inst], 0);
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_ADD_PIXEL[otg_inst], 1);
+}
+
+void dccg2_otg_drop_pixel(struct dccg *dccg,
+ uint32_t otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_ADD_PIXEL[otg_inst], 0,
+ OTG_DROP_PIXEL[otg_inst], 0);
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_DROP_PIXEL[otg_inst], 1);
+}
+
+void dccg2_init(struct dccg *dccg)
+{
}
static const struct dccg_funcs dccg2_funcs = {
.update_dpp_dto = dccg2_update_dpp_dto,
.get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
+ .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
+ .otg_add_pixel = dccg2_otg_add_pixel,
+ .otg_drop_pixel = dccg2_otg_drop_pixel,
.dccg_init = dccg2_init
};
@@ -131,7 +148,7 @@ struct dccg *dccg2_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
- struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
struct dccg *base;
if (dccg_dcn == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 2205cb0204e7..b3c9a9724efd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -34,12 +34,19 @@
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
- SR(REFCLK_CNTL)
+ SR(REFCLK_CNTL),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ SR(DISPCLK_FREQ_CHANGE_CNTL)
#define DCCG_REG_LIST_DCN2() \
DCCG_COMMON_REG_LIST_DCN_BASE(),\
DCCG_SRII(DTO_PARAM, DPPCLK, 4),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 5)
+ DCCG_SRII(DTO_PARAM, DPPCLK, 5),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 4),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 5)
#define DCCG_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -47,6 +54,9 @@
#define DCCG_SFI(reg_name, field_name, field_prefix, inst, post_fix)\
.field_prefix ## _ ## field_name[inst] = reg_name ## __ ## field_prefix ## inst ## _ ## field_name ## post_fix
+#define DCCG_SFII(block, reg_name, field_prefix, field_name, inst, post_fix)\
+ .field_prefix ## _ ## field_name[inst] = block ## inst ## _ ## reg_name ## __ ## field_prefix ## inst ## _ ## field_name ## post_fix
+
#define DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
@@ -59,14 +69,49 @@
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\
- DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh)
+ DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh)
+
+
+
#define DCCG_MASK_SH_LIST_DCN2(mask_sh) \
DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 4, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 4, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 5, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 5, mask_sh)
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 5, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 4, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 5, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 4, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 5, mask_sh)
+
+#define DCCG_MASK_SH_LIST_DCN2_1(mask_sh) \
+ DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 4, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 4, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 5, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 5, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 3, mask_sh)
+
#define DCCG_REG_FIELD_LIST(type) \
type DPPCLK0_DTO_PHASE;\
@@ -74,20 +119,135 @@
type DPPCLK_DTO_ENABLE[6];\
type DPPCLK_DTO_DB_EN[6];\
type REFCLK_CLOCK_EN;\
- type REFCLK_SRC_SEL;
+ type REFCLK_SRC_SEL;\
+ type DISPCLK_STEP_DELAY;\
+ type DISPCLK_STEP_SIZE;\
+ type DISPCLK_FREQ_RAMP_DONE;\
+ type DISPCLK_MAX_ERRDET_CYCLES;\
+ type DCCG_FIFO_ERRDET_RESET;\
+ type DCCG_FIFO_ERRDET_STATE;\
+ type DCCG_FIFO_ERRDET_OVR_EN;\
+ type DISPCLK_CHG_FWD_CORR_DISABLE;\
+ type DISPCLK_FREQ_CHANGE_CNTL;\
+ type OTG_ADD_PIXEL[MAX_PIPES];\
+ type OTG_DROP_PIXEL[MAX_PIPES];
+
+#define DCCG3_REG_FIELD_LIST(type) \
+ type PHYASYMCLK_FORCE_EN;\
+ type PHYASYMCLK_FORCE_SRC_SEL;\
+ type PHYBSYMCLK_FORCE_EN;\
+ type PHYBSYMCLK_FORCE_SRC_SEL;\
+ type PHYCSYMCLK_FORCE_EN;\
+ type PHYCSYMCLK_FORCE_SRC_SEL;
+
+#define DCCG31_REG_FIELD_LIST(type) \
+ type PHYDSYMCLK_FORCE_EN;\
+ type PHYDSYMCLK_FORCE_SRC_SEL;\
+ type PHYESYMCLK_FORCE_EN;\
+ type PHYESYMCLK_FORCE_SRC_SEL;\
+ type DPSTREAMCLK_PIPE0_EN;\
+ type DPSTREAMCLK_PIPE1_EN;\
+ type DPSTREAMCLK_PIPE2_EN;\
+ type DPSTREAMCLK_PIPE3_EN;\
+ type HDMISTREAMCLK0_SRC_SEL;\
+ type HDMISTREAMCLK0_DTO_FORCE_DIS;\
+ type SYMCLK32_SE0_SRC_SEL;\
+ type SYMCLK32_SE1_SRC_SEL;\
+ type SYMCLK32_SE2_SRC_SEL;\
+ type SYMCLK32_SE3_SRC_SEL;\
+ type SYMCLK32_SE0_EN;\
+ type SYMCLK32_SE1_EN;\
+ type SYMCLK32_SE2_EN;\
+ type SYMCLK32_SE3_EN;\
+ type SYMCLK32_LE0_SRC_SEL;\
+ type SYMCLK32_LE1_SRC_SEL;\
+ type SYMCLK32_LE0_EN;\
+ type SYMCLK32_LE1_EN;\
+ type DTBCLK_DTO_ENABLE[MAX_PIPES];\
+ type DTBCLKDTO_ENABLE_STATUS[MAX_PIPES];\
+ type PIPE_DTO_SRC_SEL[MAX_PIPES];\
+ type DTBCLK_DTO_DIV[MAX_PIPES];\
+ type DCCG_AUDIO_DTO_SEL;\
+ type DCCG_AUDIO_DTO0_SOURCE_SEL;\
+ type DENTIST_DISPCLK_CHG_MODE;\
+ type DSCCLK0_DTO_PHASE;\
+ type DSCCLK0_DTO_MODULO;\
+ type DSCCLK1_DTO_PHASE;\
+ type DSCCLK1_DTO_MODULO;\
+ type DSCCLK2_DTO_PHASE;\
+ type DSCCLK2_DTO_MODULO;\
+ type DSCCLK0_DTO_ENABLE;\
+ type DSCCLK1_DTO_ENABLE;\
+ type DSCCLK2_DTO_ENABLE;\
+ type SYMCLK32_ROOT_SE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE1_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE2_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE3_GATE_DISABLE;\
+ type SYMCLK32_SE0_GATE_DISABLE;\
+ type SYMCLK32_SE1_GATE_DISABLE;\
+ type SYMCLK32_SE2_GATE_DISABLE;\
+ type SYMCLK32_SE3_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE1_GATE_DISABLE;\
+ type SYMCLK32_LE0_GATE_DISABLE;\
+ type SYMCLK32_LE1_GATE_DISABLE;\
+ type DPSTREAMCLK_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK_GATE_DISABLE;\
+ type HDMISTREAMCLK0_DTO_PHASE;\
+ type HDMISTREAMCLK0_DTO_MODULO;\
+ type HDMICHARCLK0_GATE_DISABLE;\
+ type HDMICHARCLK0_ROOT_GATE_DISABLE; \
+ type PHYASYMCLK_GATE_DISABLE; \
+ type PHYBSYMCLK_GATE_DISABLE; \
+ type PHYCSYMCLK_GATE_DISABLE; \
+ type PHYDSYMCLK_GATE_DISABLE; \
+ type PHYESYMCLK_GATE_DISABLE;
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
+ DCCG3_REG_FIELD_LIST(uint8_t)
+ DCCG31_REG_FIELD_LIST(uint8_t)
};
struct dccg_mask {
DCCG_REG_FIELD_LIST(uint32_t)
+ DCCG3_REG_FIELD_LIST(uint32_t)
+ DCCG31_REG_FIELD_LIST(uint32_t)
};
struct dccg_registers {
uint32_t DPPCLK_DTO_CTRL;
uint32_t DPPCLK_DTO_PARAM[6];
uint32_t REFCLK_CNTL;
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL;
+ uint32_t OTG_PIXEL_RATE_CNTL[MAX_PIPES];
+ uint32_t HDMICHARCLK_CLOCK_CNTL[6];
+ uint32_t PHYASYMCLK_CLOCK_CNTL;
+ uint32_t PHYBSYMCLK_CLOCK_CNTL;
+ uint32_t PHYCSYMCLK_CLOCK_CNTL;
+ uint32_t PHYDSYMCLK_CLOCK_CNTL;
+ uint32_t PHYESYMCLK_CLOCK_CNTL;
+ uint32_t DTBCLK_DTO_MODULO[MAX_PIPES];
+ uint32_t DTBCLK_DTO_PHASE[MAX_PIPES];
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_MODULO;
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_PHASE;
+ uint32_t DCCG_AUDIO_DTO_SOURCE;
+ uint32_t DPSTREAMCLK_CNTL;
+ uint32_t HDMISTREAMCLK_CNTL;
+ uint32_t SYMCLK32_SE_CNTL;
+ uint32_t SYMCLK32_LE_CNTL;
+ uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DSCCLK_DTO_CTRL;
+ uint32_t DSCCLK0_DTO_PARAM;
+ uint32_t DSCCLK1_DTO_PARAM;
+ uint32_t DSCCLK2_DTO_PARAM;
+ uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
+ uint32_t DPSTREAMCLK_GATE_DISABLE;
+ uint32_t DCCG_GATE_DISABLE_CNTL2;
+ uint32_t DCCG_GATE_DISABLE_CNTL3;
+ uint32_t HDMISTREAMCLK0_DTO_PARAM;
+ uint32_t DCCG_GATE_DISABLE_CNTL4;
+
};
struct dcn_dccg {
@@ -103,6 +263,14 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz);
+void dccg2_set_fifo_errdet_ovr_en(struct dccg *dccg,
+ bool en);
+void dccg2_otg_add_pixel(struct dccg *dccg,
+ uint32_t otg_inst);
+void dccg2_otg_drop_pixel(struct dccg *dccg,
+ uint32_t otg_inst);
+
+
void dccg2_init(struct dccg *dccg);
struct dccg *dccg2_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 9bc5dd23d297..970b65efeac1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -72,6 +72,21 @@ void dpp20_read_state(struct dpp *dpp_base,
}
}
+void dpp2_power_on_obuf(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE(CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, power_on == true ? 1:0);
+
+ REG_UPDATE(OBUF_MEM_PWR_CTRL,
+ OBUF_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+ REG_UPDATE(DSCL_MEM_PWR_CTRL,
+ LUT_MEM_PWR_FORCE, power_on == true ? 0:1);
+}
+
void dpp2_dummy_program_input_lut(
struct dpp *dpp_base,
const struct dc_gamma *gamma)
@@ -89,7 +104,7 @@ static void dpp2_cnv_setup (
uint32_t pixel_format = 0;
uint32_t alpha_en = 1;
enum dc_color_space color_space = COLOR_SPACE_SRGB;
- enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;
bool force_disable_cursor = false;
struct out_csc_color_matrix tbl_entry;
uint32_t is_2bit = 0;
@@ -130,28 +145,29 @@ static void dpp2_cnv_setup (
force_disable_cursor = false;
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
force_disable_cursor = true;
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
force_disable_cursor = true;
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
force_disable_cursor = true;
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+ pixel_format = 26; /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
pixel_format = 24;
@@ -162,31 +178,35 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
pixel_format = 12;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
pixel_format = 115;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
+ alpha_en = 0;
break;
default:
break;
@@ -212,13 +232,13 @@ static void dpp2_cnv_setup (
tbl_entry.color_space = input_color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
else
- select = INPUT_CSC_SELECT_BYPASS;
+ select = DCN2_ICSC_SELECT_BYPASS;
- dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry);
+ dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);
} else
- dpp1_program_input_csc(dpp_base, color_space, select, NULL);
+ dpp2_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
@@ -227,23 +247,10 @@ static void dpp2_cnv_setup (
CUR0_ENABLE, 0);
}
+ dpp2_power_on_obuf(dpp_base, true);
}
-void dpp2_cnv_set_bias_scale(
- struct dpp *dpp_base,
- struct dc_bias_and_scale *bias_and_scale)
-{
- struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
-
- REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red);
- REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green);
- REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue);
- REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red);
- REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green);
- REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
-}
-
/*compute the maximum number of lines that we can fit in the line buffer*/
void dscl2_calc_lb_num_partitions(
const struct scaler_data *scl_data,
@@ -326,14 +333,18 @@ void dpp2_cnv_set_alpha_keyer(
void dpp2_set_cursor_attributes(
struct dpp *dpp_base,
- enum dc_cursor_color_format color_format)
+ struct dc_cursor_attributes *cursor_attributes)
{
+ enum dc_cursor_color_format color_format = cursor_attributes->color_format;
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
- color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
- cur_rom_en = 1;
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
+ }
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,
@@ -349,91 +360,6 @@ void dpp2_set_cursor_attributes(
}
}
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
-
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps)
-{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
- dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
- return false;
-
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
- /* TODO: add lb check */
-
- /* No support for programming ratio of 8, drop to 7.99999.. */
- if (scl_data->ratios.horz.value == (8ll << 32))
- scl_data->ratios.horz.value--;
- if (scl_data->ratios.vert.value == (8ll << 32))
- scl_data->ratios.vert.value--;
- if (scl_data->ratios.horz_c.value == (8ll << 32))
- scl_data->ratios.horz_c.value--;
- if (scl_data->ratios.vert_c.value == (8ll << 32))
- scl_data->ratios.vert_c.value--;
-
- /* Set default taps if none are provided */
- if (in_taps->h_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
- scl_data->taps.h_taps = 8;
- else
- scl_data->taps.h_taps = 4;
- } else
- scl_data->taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
- scl_data->taps.v_taps = 8;
- else
- scl_data->taps.v_taps = 4;
- } else
- scl_data->taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
- scl_data->taps.v_taps_c = 4;
- else
- scl_data->taps.v_taps_c = 2;
- } else
- scl_data->taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
- scl_data->taps.h_taps_c = 4;
- else
- scl_data->taps.h_taps_c = 2;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- scl_data->taps.h_taps_c = in_taps->h_taps_c;
-
- if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
- scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
- scl_data->taps.v_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.horz_c))
- scl_data->taps.h_taps_c = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert_c))
- scl_data->taps.v_taps_c = 1;
- }
-
- return true;
-}
-
void oppn20_dummy_program_regamma_pwl(
struct dpp *dpp,
const struct pwl_params *params,
@@ -444,8 +370,8 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps,
- .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
@@ -492,7 +418,8 @@ bool dpp2_construct(
dpp->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP;
+ LB_PIXEL_DEPTH_30BPP |
+ LB_PIXEL_DEPTH_36BPP;
dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 59b67ed57c19..e735363d0051 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -30,16 +30,20 @@
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
-#define TF_REG_LIST_DCN20(id) \
- TF_REG_LIST_DCN(id), \
+#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \
SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON(id) \
SRI(CM_BLNDGAM_CONTROL, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \
@@ -66,9 +70,6 @@
SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \
@@ -147,7 +148,22 @@
SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
- SRI(CM_SHAPER_LUT_INDEX, CM, id), \
+ SRI(CM_SHAPER_LUT_INDEX, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \
+ SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+ SRI(CM_ICSC_B_C11_C12, CM, id), \
+ SRI(CM_ICSC_B_C33_C34, CM, id)
+
+#define TF_REG_LIST_DCN20(id) \
+ TF_REG_LIST_DCN(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON_UPDATED(id), \
SRI(CURSOR_CONTROL, CURSOR0_, id), \
SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
@@ -162,29 +178,45 @@
SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \
SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
SRI(CM_SHAPER_LUT_DATA, CM, id), \
- SRI(CURSOR_CONTROL, CURSOR0_, id)
+ SRI(CURSOR_CONTROL, CURSOR0_, id),\
+ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+ SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
-#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
- TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+
+#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -259,18 +291,9 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -339,9 +362,6 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \
TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \
@@ -354,7 +374,6 @@
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \
- TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \
TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \
TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \
@@ -519,9 +538,14 @@
TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \
TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
@@ -554,11 +578,33 @@
TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \
- TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh)
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\
+ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
+ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
+/* DPP CM debug status register:
+ *
+ * Status index including current ICSC, Gamut Remap Mode is 9
+ * ICSC Mode: [4..3]
+ * Gamut Remap Mode: [10..9]
+ */
+#define CM_TEST_DEBUG_DATA_STATUS_IDX 9
+
+#define TF_DEBUG_REG_LIST_SH_DCN20 \
+ TF_DEBUG_REG_LIST_SH_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9
+
+#define TF_DEBUG_REG_LIST_MASK_DCN20 \
+ TF_DEBUG_REG_LIST_MASK_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
+ type CM_TEST_DEBUG_DATA_ICSC_MODE; \
+ type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \
type FORMAT_CNV16; \
type CNVC_BYPASS_MSB_ALIGN; \
type CLAMP_POSITIVE; \
@@ -585,7 +631,9 @@
type COLOR_KEYER_BLUE_HIGH; \
type CUR0_PIX_INV_MODE; \
type CUR0_PIXEL_ALPHA_MOD_EN; \
- type CUR0_ROM_EN
+ type CUR0_ROM_EN;\
+ type OBUF_MEM_PWR_FORCE
+
struct dcn2_dpp_shift {
TF_REG_FIELD_LIST_DCN2_0(uint8_t);
@@ -609,10 +657,22 @@ struct dcn2_dpp_mask {
uint32_t COLOR_KEYER_ALPHA; \
uint32_t COLOR_KEYER_RED; \
uint32_t COLOR_KEYER_GREEN; \
- uint32_t COLOR_KEYER_BLUE
+ uint32_t COLOR_KEYER_BLUE; \
+ uint32_t OBUF_MEM_PWR_CTRL
+
+#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \
+ uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+ uint32_t CM_ICSC_B_C11_C12; \
+ uint32_t CM_ICSC_B_C33_C34
struct dcn2_dpp_registers {
DPP_DCN2_REG_VARIABLE_LIST;
+ DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND;
};
struct dcn20_dpp {
@@ -634,6 +694,18 @@ struct dcn20_dpp {
struct pwl_params pwl_data;
};
+enum dcn20_input_csc_select {
+ DCN2_ICSC_SELECT_BYPASS = 0,
+ DCN2_ICSC_SELECT_ICSC_A = 1,
+ DCN2_ICSC_SELECT_ICSC_B = 2
+};
+
+enum dcn20_gamut_remap_select {
+ DCN2_GAMUT_REMAP_BYPASS = 0,
+ DCN2_GAMUT_REMAP_COEF_A = 1,
+ DCN2_GAMUT_REMAP_COEF_B = 2
+};
+
void dpp20_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
@@ -645,6 +717,16 @@ void dpp2_set_degamma(
struct dpp *dpp_base,
enum ipp_degamma_mode mode);
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry);
+
bool dpp20_program_blnd_lut(
struct dpp *dpp_base, const struct pwl_params *params);
@@ -668,7 +750,7 @@ void dscl2_calc_lb_num_partitions(
void dpp2_set_cursor_attributes(
struct dpp *dpp_base,
- enum dc_cursor_color_format color_format);
+ struct dc_cursor_attributes *cursor_attributes);
void dpp2_dummy_program_input_lut(
struct dpp *dpp_base,
@@ -683,11 +765,6 @@ void dpp2_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps);
-
bool dpp2_construct(struct dcn20_dpp *dpp2,
struct dc_context *ctx,
uint32_t inst,
@@ -695,4 +772,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2,
const struct dcn2_dpp_shift *tf_shift,
const struct dcn2_dpp_mask *tf_mask);
+void dpp2_power_on_obuf(
+ struct dpp *dpp_base,
+ bool power_on);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index e28b8e7bedf5..2feb051a2002 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -36,6 +36,9 @@
#define REG(reg)\
dpp->tf_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
dpp->base.ctx
@@ -44,15 +47,17 @@
dpp->tf_shift->field_name, dpp->tf_mask->field_name
-
-
-
static void dpp2_enable_cm_block(
struct dpp *dpp_base)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
- REG_UPDATE(CM_CONTROL, CM_BYPASS, 0);
+ unsigned int cm_bypass_mode = 0;
+ //Temp, put CM in bypass mode
+ if (dpp_base->ctx->dc->debug.cm_in_bypass)
+ cm_bypass_mode = 1;
+
+ REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
}
@@ -144,12 +149,164 @@ void dpp2_set_degamma(
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
+ case IPP_DEGAMMA_MODE_USER_PWL:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
+static void program_gamut_remap(
+ struct dcn20_dpp *dpp,
+ const uint16_t *regval,
+ enum dcn20_gamut_remap_select select)
+{
+ uint32_t cur_select = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+
+ /* determine which gamut_remap coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the update so gamut_remap is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select);
+
+ /* value stored in dbg reg will be 1 greater than mode we want */
+ if (cur_select != DCN2_GAMUT_REMAP_COEF_A)
+ select = DCN2_GAMUT_REMAP_COEF_A;
+ else
+ select = DCN2_GAMUT_REMAP_COEF_B;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+ } else {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, select);
+
+}
+
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i = 0;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A);
+ }
+}
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t cur_select = 0;
+ enum dcn20_input_csc_select select;
+ struct color_matrices_reg icsc_regs;
+
+ if (input_select == DCN2_ICSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dpp_input_csc_matrix[i].color_space == color_space) {
+ regval = dpp_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ } else {
+ regval = tbl_entry->regval;
+ }
+
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select);
+
+ if (cur_select != DCN2_ICSC_SELECT_ICSC_A)
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ else
+ select = DCN2_ICSC_SELECT_ICSC_B;
+
+ icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+ if (select == DCN2_ICSC_SELECT_ICSC_A) {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ } else {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34);
+
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &icsc_regs);
+
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, select);
+}
+
static void dpp20_power_on_blnd_lut(
struct dpp *dpp_base,
bool power_on)
@@ -325,7 +482,7 @@ bool dpp20_program_blnd_lut(
next_mode = LUT_RAM_A;
dpp20_power_on_blnd_lut(dpp_base, true);
- dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
+ dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
dpp20_program_blnd_luta_settings(dpp_base, params);
@@ -736,7 +893,7 @@ bool dpp20_program_shaper(
else
next_mode = LUT_RAM_A;
- dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
+ dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
dpp20_program_shaper_luta_settings(dpp_base, params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index ffd0014ec3b5..136a9dc062bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -23,13 +23,14 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#include <drm/display/drm_dsc_helper.h>
+
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps);
-static bool dsc_prepare_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg);
static void dsc_init_reg_values(struct dsc_reg_values *reg_vals);
static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params);
@@ -42,17 +43,21 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
- struct dsc_optc_config *dsc_optc_cfg, uint8_t *dsc_packed_pps);
+ struct dsc_optc_config *dsc_optc_cfg);
+static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc2_disable(struct display_stream_compressor *dsc);
+static void dsc2_disconnect(struct display_stream_compressor *dsc);
const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
+ .dsc_get_packed_pps = dsc2_get_packed_pps,
.dsc_enable = dsc2_enable,
.dsc_disable = dsc2_disable,
+ .dsc_disconnect = dsc2_disconnect,
};
/* Macro definitios for REG_SET macros*/
@@ -117,7 +122,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
@@ -155,53 +160,93 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
- REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel);
+ REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel);
+ REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
+ REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
+ REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
+ REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
+ REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en,
+ DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
struct dsc_optc_config dsc_optc_cfg;
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
- if (dsc_cfg->pic_width > TO_DCN20_DSC(dsc)->max_image_width)
+ if (dsc_cfg->pic_width > dsc20->max_image_width)
return false;
- return dsc_prepare_config(dsc, dsc_cfg, &dsc_optc_cfg);
+ return dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, &dsc_optc_cfg);
}
-static void dsc_config_log(struct display_stream_compressor *dsc,
- const struct dsc_config *config)
+static void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config)
{
- DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
- DC_LOG_DSC("\n\tnum_slices_h %d\n\tnum_slices_v %d\n\tbits_per_pixel %d\n\tcolor_depth %d",
- config->dc_dsc_cfg.num_slices_h,
- config->dc_dsc_cfg.num_slices_v,
+ DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h);
+ DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v);
+ DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)",
config->dc_dsc_cfg.bits_per_pixel,
- config->color_depth);
+ config->dc_dsc_cfg.bits_per_pixel / 16,
+ ((config->dc_dsc_cfg.bits_per_pixel % 16) * 10000) / 16);
+ DC_LOG_DSC("\tcolor_depth %d", config->color_depth);
}
static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
- struct dsc_optc_config *dsc_optc_cfg, uint8_t *dsc_packed_pps)
+ struct dsc_optc_config *dsc_optc_cfg)
{
bool is_config_ok;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ DC_LOG_DSC(" ");
+ DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
dsc_config_log(dsc, dsc_cfg);
- is_config_ok = dsc_prepare_config(dsc, dsc_cfg, dsc_optc_cfg);
+ is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg);
ASSERT(is_config_ok);
- drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc20->reg_vals.pps);
+ DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):");
dsc_log_pps(dsc, &dsc20->reg_vals.pps);
dsc_write_to_registers(dsc, &dsc20->reg_vals);
}
+static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps)
+{
+ bool is_config_ok;
+ struct dsc_reg_values dsc_reg_vals;
+ struct dsc_optc_config dsc_optc_cfg;
+
+ memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
+ memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
+
+ DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
+ dsc_config_log(dsc, dsc_cfg);
+ DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
+ is_config_ok = dsc_prepare_config(dsc_cfg, &dsc_reg_vals, &dsc_optc_cfg);
+ ASSERT(is_config_ok);
+ drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc_reg_vals.pps);
+ dsc_log_pps(dsc, &dsc_reg_vals.pps);
+
+ return is_config_ok;
+}
+
+
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
- /* TODO Check if DSC alreay in use? */
- DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe);
+ DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
REG_UPDATE(DSC_TOP_CONTROL,
DSC_CLOCK_EN, 1);
@@ -215,8 +260,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
static void dsc2_disable(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
- DC_LOG_DSC("disable DSC");
+ DC_LOG_DSC("disable DSC %d", dsc->inst);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if (!dsc_clock_en || !dsc_fw_config) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 0);
@@ -225,6 +280,15 @@ static void dsc2_disable(struct display_stream_compressor *dsc)
DSC_CLOCK_EN, 0);
}
+static void dsc2_disconnect(struct display_stream_compressor *dsc)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ DC_LOG_DSC("disconnect DSC %d", dsc->inst);
+
+ REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
+ DSCRM_DSC_FORWARD_EN, 0);
+}
/* This module's internal functions */
static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps)
@@ -232,7 +296,6 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co
int i;
int bits_per_pixel = pps->bits_per_pixel;
- DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):");
DC_LOG_DSC("\tdsc_version_major %d", pps->dsc_version_major);
DC_LOG_DSC("\tdsc_version_minor %d", pps->dsc_version_minor);
DC_LOG_DSC("\tbits_per_component %d", pps->bits_per_component);
@@ -282,13 +345,11 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co
}
}
-static bool dsc_prepare_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg)
{
struct dsc_parameters dsc_params;
- struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
-
/* Validate input parameters */
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h);
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_v);
@@ -302,7 +363,7 @@ static bool dsc_prepare_config(struct display_stream_compressor *dsc, const stru
dsc_cfg->dc_dsc_cfg.linebuf_depth == 0)));
ASSERT(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff); // 6.0 <= bits_per_pixel <= 63.9375
- if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_v ||
+ if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_h ||
!(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2) ||
!dsc_cfg->pic_width || !dsc_cfg->pic_height ||
!((dsc_cfg->dc_dsc_cfg.version_minor == 1 && // v1.1 line buffer depth range:
@@ -315,54 +376,55 @@ static bool dsc_prepare_config(struct display_stream_compressor *dsc, const stru
return false;
}
- dsc_init_reg_values(&dsc20->reg_vals);
+ dsc_init_reg_values(dsc_reg_vals);
/* Copy input config */
- dsc20->reg_vals.pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple);
- dsc20->reg_vals.num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h;
- dsc20->reg_vals.num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v;
- dsc20->reg_vals.pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor;
- dsc20->reg_vals.pps.pic_width = dsc_cfg->pic_width;
- dsc20->reg_vals.pps.pic_height = dsc_cfg->pic_height;
- dsc20->reg_vals.pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth);
- dsc20->reg_vals.pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
- dsc20->reg_vals.pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
- dsc20->reg_vals.alternate_ich_encoding_en = dsc20->reg_vals.pps.dsc_version_minor == 1 ? 0 : 1;
+ dsc_reg_vals->pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple);
+ dsc_reg_vals->num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h;
+ dsc_reg_vals->num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v;
+ dsc_reg_vals->pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor;
+ dsc_reg_vals->pps.pic_width = dsc_cfg->pic_width;
+ dsc_reg_vals->pps.pic_height = dsc_cfg->pic_height;
+ dsc_reg_vals->pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth);
+ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
+ dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
+ dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
+ dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
- dsc20->reg_vals.pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
- dsc20->reg_vals.pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
+ dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
+ dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
- ASSERT(dsc20->reg_vals.pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
- if (!(dsc20->reg_vals.pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) {
+ ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
+ if (!(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) {
dm_output_to_console("%s: pix height %d not divisible by num_slices_v %d\n\n", __func__, dsc_cfg->pic_height, dsc_cfg->dc_dsc_cfg.num_slices_v);
return false;
}
- dsc20->reg_vals.bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1;
- if (dsc20->reg_vals.pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc20->reg_vals.pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
- dsc20->reg_vals.pps.bits_per_pixel = dsc20->reg_vals.bpp_x32;
+ dsc_reg_vals->bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1;
+ if (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
+ dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32;
else
- dsc20->reg_vals.pps.bits_per_pixel = dsc20->reg_vals.bpp_x32 >> 1;
+ dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32 >> 1;
- dsc20->reg_vals.pps.convert_rgb = dsc20->reg_vals.pixel_format == DSC_PIXFMT_RGB ? 1 : 0;
- dsc20->reg_vals.pps.native_422 = (dsc20->reg_vals.pixel_format == DSC_PIXFMT_NATIVE_YCBCR422);
- dsc20->reg_vals.pps.native_420 = (dsc20->reg_vals.pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
- dsc20->reg_vals.pps.simple_422 = (dsc20->reg_vals.pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
+ dsc_reg_vals->pps.convert_rgb = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ? 1 : 0;
+ dsc_reg_vals->pps.native_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422);
+ dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
+ dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
- if (dscc_compute_dsc_parameters(&dsc20->reg_vals.pps, &dsc_params)) {
+ if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &dsc_params)) {
dm_output_to_console("%s: DSC config failed\n", __func__);
return false;
}
- dsc_update_from_dsc_parameters(&dsc20->reg_vals, &dsc_params);
+ dsc_update_from_dsc_parameters(dsc_reg_vals, &dsc_params);
dsc_optc_cfg->bytes_per_pixel = dsc_params.bytes_per_pixel;
- dsc_optc_cfg->slice_width = dsc20->reg_vals.pps.slice_width;
- dsc_optc_cfg->is_pixel_format_444 = dsc20->reg_vals.pixel_format == DSC_PIXFMT_RGB ||
- dsc20->reg_vals.pixel_format == DSC_PIXFMT_YCBCR444 ||
- dsc20->reg_vals.pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422;
+ dsc_optc_cfg->slice_width = dsc_reg_vals->pps.slice_width;
+ dsc_optc_cfg->is_pixel_format_444 = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ||
+ dsc_reg_vals->pixel_format == DSC_PIXFMT_YCBCR444 ||
+ dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422;
return true;
}
@@ -427,6 +489,8 @@ static void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
{
int i;
+ memset(reg_vals, 0, sizeof(struct dsc_reg_values));
+
/* Non-PPS values */
reg_vals->dsc_clock_enable = 1;
reg_vals->dsc_clock_gating_disable = 0;
@@ -436,7 +500,7 @@ static void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
reg_vals->ich_reset_at_eol = 0;
reg_vals->alternate_ich_encoding_en = 0;
reg_vals->rc_buffer_model_size = 0;
- reg_vals->disable_ich = 0;
+ /*reg_vals->disable_ich = 0;*/
reg_vals->dsc_dbg_en = 0;
for (i = 0; i < 4; i++)
@@ -488,7 +552,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons
reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
- reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf;
}
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
@@ -518,9 +581,11 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
ALTERNATE_ICH_ENCODING_EN, reg_vals->alternate_ich_encoding_en,
NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, reg_vals->num_slices_v - 1);
- REG_SET_2(DSCC_CONFIG1, 0,
+ REG_SET(DSCC_CONFIG1, 0,
+ DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size);
+ /*REG_SET_2(DSCC_CONFIG1, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size,
- DSCC_DISABLE_ICH, reg_vals->disable_ich);
+ DSCC_DISABLE_ICH, reg_vals->disable_ich);*/
REG_SET_4(DSCC_INTERRUPT_CONTROL_STATUS, 0,
DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[0],
@@ -672,23 +737,5 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
RANGE_MAX_QP14, reg_vals->pps.rc_range_params[14].range_max_qp,
RANGE_BPG_OFFSET14, reg_vals->pps.rc_range_params[14].range_bpg_offset);
- if (IS_FPGA_MAXIMUS_DC(dsc20->base.ctx->dce_environment)) {
- /* It's safe to do this as long as debug bus is not being used in DAL Diag environment.
- *
- * This is because DSCC_PPS_CONFIG4.INITIAL_DEC_DELAY is a read-only register field (because it's a decoder
- * value not required by DSC encoder). However, since decoding fails when this value is missing from PPS, it's
- * required to communicate this value to the PPS header. When testing on FPGA, the values for PPS header are
- * being read from Diag register dump. The register below is used in place of a scratch register to make
- * 'initial_dec_delay' available.
- */
-
- temp_int = reg_vals->pps.initial_dec_delay;
- REG_SET_4(DSCC_TEST_DEBUG_BUS_ROTATE, 0,
- DSCC_TEST_DEBUG_BUS0_ROTATE, temp_int & 0x1f,
- DSCC_TEST_DEBUG_BUS1_ROTATE, temp_int >> 5 & 0x1f,
- DSCC_TEST_DEBUG_BUS2_ROTATE, temp_int >> 10 & 0x1f,
- DSCC_TEST_DEBUG_BUS3_ROTATE, temp_int >> 15 & 0x1);
- }
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 168865a16288..c21ecedc4692 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -21,13 +21,12 @@
* Authors: AMD
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#ifndef __DCN20_DSC_H__
#define __DCN20_DSC_H__
#include "dsc.h"
#include "dsc/dscc_types.h"
-#include <drm/drm_dsc.h>
+#include <drm/display/drm_dsc.h>
#define TO_DCN20_DSC(dsc)\
container_of(dsc, struct dcn20_dsc, base)
@@ -79,7 +78,6 @@
SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_TEST_DEBUG_BUS_ROTATE, DSCC, id),\
SRI(DSCCIF_CONFIG0, DSCCIF, id),\
SRI(DSCCIF_CONFIG1, DSCCIF, id),\
SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
@@ -97,13 +95,12 @@
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh), \
+ /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
@@ -250,10 +247,6 @@
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS0_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS1_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS2_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS3_ROTATE, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
@@ -278,7 +271,7 @@
type ALTERNATE_ICH_ENCODING_EN; \
type NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION; \
type DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE; \
- type DSCC_DISABLE_ICH; \
+ /*type DSCC_DISABLE_ICH;*/ \
type DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED; \
type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED; \
@@ -428,10 +421,6 @@
type DSCC_UPDATE_PENDING_STATUS; \
type DSCC_UPDATE_TAKEN_STATUS; \
type DSCC_UPDATE_TAKEN_ACK; \
- type DSCC_TEST_DEBUG_BUS0_ROTATE; \
- type DSCC_TEST_DEBUG_BUS1_ROTATE; \
- type DSCC_TEST_DEBUG_BUS2_ROTATE; \
- type DSCC_TEST_DEBUG_BUS3_ROTATE; \
type DSCC_RATE_BUFFER0_FULLNESS_LEVEL; \
type DSCC_RATE_BUFFER1_FULLNESS_LEVEL; \
type DSCC_RATE_BUFFER2_FULLNESS_LEVEL; \
@@ -504,7 +493,6 @@ struct dcn20_dsc_registers {
uint32_t DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL;
uint32_t DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL;
uint32_t DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL;
- uint32_t DSCC_TEST_DEBUG_BUS_ROTATE;
uint32_t DSCCIF_CONFIG0;
uint32_t DSCCIF_CONFIG1;
uint32_t DSCRM_DSC_FORWARD_CONFIG;
@@ -572,4 +560,3 @@ void dsc2_construct(struct dcn20_dsc *dsc,
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index cd8bc92ce3ba..994fb732a7cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -527,7 +527,7 @@ static const uint16_t filter_12tap_16p_183[108] = {
0, 84, 16328, 16032, 416, 1944, 1944, 416, 16032, 16328, 84, 0,
};
-const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
+static const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
@@ -539,7 +539,7 @@ const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
return filter_3tap_16p_183;
}
-const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio)
+static const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
@@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
struct scaling_taps num_taps)
{
uint32_t h_ratio_luma = 1;
- uint32_t h_ratio_chroma = 1;
uint32_t h_taps_luma = num_taps.h_taps;
uint32_t h_taps_chroma = num_taps.h_taps_c;
int32_t h_init_phase_luma = 0;
@@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
h_ratio_luma = -1;
else
h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5;
- h_ratio_chroma = h_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma);
@@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
enum dwb_subsample_position subsample_position)
{
uint32_t v_ratio_luma = 1;
- uint32_t v_ratio_chroma = 1;
uint32_t v_taps_luma = num_taps.v_taps;
uint32_t v_taps_chroma = num_taps.v_taps_c;
int32_t v_init_phase_luma = 0;
@@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
v_ratio_luma = -1;
else
v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5;
- v_ratio_chroma = v_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index ece6e136437b..aacb1fb5c73e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -26,6 +26,7 @@
#include "dcn20_hubbub.h"
#include "reg_helper.h"
+#include "clk_mgr.h"
#define REG(reg)\
hubbub1->regs->reg
@@ -152,9 +153,12 @@ bool hubbub2_dcc_support_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
*bytes_per_element = 4;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
*bytes_per_element = 8;
@@ -185,14 +189,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl
}
static void hubbub2_det_request_size(
+ unsigned int detile_buf_size,
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
- unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
-
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
@@ -235,7 +238,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
&segment_order_horz, &segment_order_vert))
return false;
- hubbub2_det_request_size(input->surface_size.height, input->surface_size.width,
+ hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
@@ -292,6 +296,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
+ default:
+ ASSERT(false);
+ break;
}
output->capable = true;
output->const_color_support = true;
@@ -335,8 +342,12 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne
case 65536:
block_size = DCN_PAGE_TABLE_BLOCK_SIZE_64KB;
break;
+ case 32768:
+ block_size = DCN_PAGE_TABLE_BLOCK_SIZE_32KB;
+ break;
default:
ASSERT(false);
+ block_size = page_table_block_size;
break;
}
@@ -366,25 +377,29 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub,
struct dcn_vmid_page_table_config phys_config;
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
- FB_BASE, pa_config->system_aperture.fb_base);
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
- FB_TOP, pa_config->system_aperture.fb_top);
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
REG_SET(DCN_VM_FB_OFFSET, 0,
- FB_OFFSET, pa_config->system_aperture.fb_offset);
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
REG_SET(DCN_VM_AGP_BOT, 0,
- AGP_BOT, pa_config->system_aperture.agp_bot);
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
REG_SET(DCN_VM_AGP_TOP, 0,
- AGP_TOP, pa_config->system_aperture.agp_top);
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
REG_SET(DCN_VM_AGP_BASE, 0,
- AGP_BASE, pa_config->system_aperture.agp_base);
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0,
+ DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, (pa_config->page_table_default_page_addr >> 44) & 0xF);
+ REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0,
+ DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, (pa_config->page_table_default_page_addr >> 12) & 0xFFFFFFFF);
if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
- phys_config.depth = 1;
- phys_config.block_size = 4096;
phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
-
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
@@ -397,54 +412,67 @@ void hubbub2_update_dchub(struct hubbub *hubbub,
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
- ASSERT(false);
- /*should not come here*/
+ if (REG(DCN_VM_FB_LOCATION_TOP) == 0)
return;
- }
- /* TODO: port code from dal2 */
+
switch (dh_data->fb_mode) {
case FRAME_BUFFER_MODE_ZFB_ONLY:
/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
- REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
- SDPIF_FB_TOP, 0);
-
- REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
- SDPIF_FB_BASE, 0x0FFFF);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 22);
+ REG_UPDATE(DCN_VM_FB_LOCATION_TOP,
+ FB_TOP, 0);
+
+ REG_UPDATE(DCN_VM_FB_LOCATION_BASE,
+ FB_BASE, 0xFFFFFF);
+
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
+
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
+
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 24);
break;
case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 22);
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 24);
break;
case FRAME_BUFFER_MODE_LOCAL_ONLY:
- /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, 0);
+ /*Should not touch FB LOCATION (should be done by VBIOS)*/
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, 0X03FFFF);
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, 0);
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, 0);
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, 0xFFFFFF);
+
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, 0);
break;
default:
break;
@@ -540,26 +568,61 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
}
}
-static void hubbub2_program_watermarks(
+static bool hubbub2_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ /*
+ * There's a special case when going from p-state support to p-state unsupported
+ * here we are going to LOWER watermarks to go to dummy p-state only, but this has
+ * to be done prepare_bandwidth, not optimize
+ */
+ if (hubbub1->base.ctx->dc->clk_mgr->clks.prev_p_state_change_support == true &&
+ hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false)
+ safe_to_lower = true;
+
hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ return wm_pending;
+}
+
+void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_state)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ if (REG(DCN_VM_FAULT_ADDR_MSB))
+ hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_MSB);
+
+ if (REG(DCN_VM_FAULT_ADDR_LSB))
+ hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_LSB);
+
+ if (REG(DCN_VM_FAULT_CNTL))
+ REG_GET(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, &hubbub_state->vm_error_mode);
+
+ if (REG(DCN_VM_FAULT_STATUS)) {
+ REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, &hubbub_state->vm_error_status);
+ REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid);
+ REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe);
+ }
}
static const struct hubbub_funcs hubbub2_funcs = {
@@ -572,6 +635,9 @@ static const struct hubbub_funcs hubbub2_funcs = {
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub2_program_watermarks,
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
+ .hubbub_read_state = hubbub2_read_state,
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
@@ -589,4 +655,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index a7b6ca26a9ad..2f6146bf1d32 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -32,17 +32,27 @@
#define TO_DCN20_HUBBUB(hubbub)\
container_of(hubbub, struct dcn20_hubbub, base)
-#define HUBBUB_REG_LIST_DCN20(id)\
+#define HUBBUB_REG_LIST_DCN20_COMMON()\
HUBBUB_REG_LIST_DCN_COMMON(), \
- HUBBUB_VM_REG_LIST(), \
- HUBBUB_SR_WATERMARK_REG_LIST(), \
SR(DCHUBBUB_CRC_CTRL), \
SR(DCN_VM_FB_LOCATION_BASE),\
SR(DCN_VM_FB_LOCATION_TOP),\
SR(DCN_VM_FB_OFFSET),\
SR(DCN_VM_AGP_BOT),\
SR(DCN_VM_AGP_TOP),\
- SR(DCN_VM_AGP_BASE)
+ SR(DCN_VM_AGP_BASE),\
+ SR(DCN_VM_FAULT_ADDR_MSB), \
+ SR(DCN_VM_FAULT_ADDR_LSB), \
+ SR(DCN_VM_FAULT_CNTL), \
+ SR(DCN_VM_FAULT_STATUS)
+
+#define HUBBUB_REG_LIST_DCN20(id)\
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
+ HUBBUB_SR_WATERMARK_REG_LIST(), \
+ HUBBUB_VM_REG_LIST(),\
+ SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB),\
+ SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB)
+
#define HUBBUB_MASK_SH_LIST_DCN20(mask_sh)\
HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
@@ -53,7 +63,21 @@
HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \
HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \
HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \
- HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh)
+ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \
+ HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
struct dcn20_hubbub {
struct hubbub base;
@@ -62,7 +86,16 @@ struct dcn20_hubbub {
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
+ int num_vmid;
struct dcn20_vmid vmid[16];
+ unsigned int detile_buf_size;
+ unsigned int crb_size_segs;
+ unsigned int compbuf_size_segments;
+ unsigned int pixel_chunk_size;
+ unsigned int det0_size;
+ unsigned int det1_size;
+ unsigned int det2_size;
+ unsigned int det3_size;
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
@@ -104,4 +137,7 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
void hubbub2_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
+void hubbub2_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_state *hubbub_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index d3f7dd374d50..a665af19f201 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-17 Advanced Micro Devices, Inc.
+ * Copyright 2012-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -30,6 +30,8 @@
#include "reg_helper.h"
#include "basics/conversion.h"
+#define DC_LOGGER_INIT(logger)
+
#define REG(reg)\
hubp2->hubp_regs->reg
@@ -40,81 +42,6 @@
#define FN(reg_name, field_name) \
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
-void hubp2_update_dchub(
- struct hubp *hubp,
- struct dchub_init_data *dh_data)
-{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- if (REG(DCN_VM_FB_LOCATION_TOP) == 0)
- return;
-
- switch (dh_data->fb_mode) {
- case FRAME_BUFFER_MODE_ZFB_ONLY:
- /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
- REG_UPDATE(DCN_VM_FB_LOCATION_TOP,
- FB_TOP, 0);
-
- REG_UPDATE(DCN_VM_FB_LOCATION_BASE,
- FB_BASE, 0xFFFFFF);
-
- /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
- REG_UPDATE(DCN_VM_AGP_BASE,
- AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
-
- /*This field defines the bottom range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_BOT,
- AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
-
- /*This field defines the top range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_TOP,
- AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 24);
- break;
- case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
- /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
-
- /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
- REG_UPDATE(DCN_VM_AGP_BASE,
- AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
-
- /*This field defines the bottom range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_BOT,
- AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
-
- /*This field defines the top range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_TOP,
- AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 24);
- break;
- case FRAME_BUFFER_MODE_LOCAL_ONLY:
- /*Should not touch FB LOCATION (should be done by VBIOS)*/
-
- /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
- REG_UPDATE(DCN_VM_AGP_BASE,
- AGP_BASE, 0);
-
- /*This field defines the bottom range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_BOT,
- AGP_BOT, 0xFFFFFF);
-
- /*This field defines the top range of the AGP aperture and represents the 24*/
- /*MSBs, bits [47:24] of the 48 address bits*/
- REG_UPDATE(DCN_VM_AGP_TOP,
- AGP_TOP, 0);
- break;
- default:
- break;
- }
-
- dh_data->dchub_initialzied = true;
- dh_data->dchub_info_valid = false;
-}
-
void hubp2_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
@@ -156,7 +83,85 @@ void hubp2_program_deadline(
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
+ /* DLG - Per hubp */
+ REG_SET_2(BLANK_OFFSET_0, 0,
+ REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, dlg_attr->dlg_vblank_end);
+
+ REG_SET(BLANK_OFFSET_1, 0,
+ MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start);
+
+ REG_SET(DST_DIMENSIONS, 0,
+ REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal);
+
+ REG_SET_2(DST_AFTER_SCALER, 0,
+ REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
+
+ REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
+ REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_SET(VBLANK_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ if (REG(NOM_PARAMETERS_0))
+ REG_SET(NOM_PARAMETERS_0, 0,
+ DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
+
+ if (REG(NOM_PARAMETERS_1))
+ REG_SET(NOM_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_SET(NOM_PARAMETERS_4, 0,
+ DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_5, 0,
+ REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_SET_2(PER_LINE_DELIVERY, 0,
+ REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
+
+ REG_SET(VBLANK_PARAMETERS_2, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ if (REG(NOM_PARAMETERS_2))
+ REG_SET(NOM_PARAMETERS_2, 0,
+ DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
+
+ if (REG(NOM_PARAMETERS_3))
+ REG_SET(NOM_PARAMETERS_3, 0,
+ REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_SET(NOM_PARAMETERS_6, 0,
+ DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_7, 0,
+ REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_SET_2(DCN_TTU_QOS_WM, 0,
+ QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_SET_3(DCN_SURF0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
+
+ REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
+
+ REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
REG_SET(FLIP_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_FLIP_L, dlg_attr->refcyc_per_pte_group_flip_l);
@@ -176,14 +181,49 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
else
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (pipe_dest->htotal != 0) {
+ if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ value = 1;
+ } else
+ value = 0;
+ }
+
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
+static void hubp2_program_requestor(struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
+}
+
static void hubp2_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
@@ -196,7 +236,7 @@ static void hubp2_setup(
*/
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
- hubp1_program_requestor(hubp, rq_regs);
+ hubp2_program_requestor(hubp, rq_regs);
hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
}
@@ -283,11 +323,219 @@ static void hubp2_program_tiling(
PIPE_ALIGNED, 0);
}
+void hubp2_program_size(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ const struct plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
+ bool use_pitch_c = false;
+
+ /* Program data and meta surface pitch (calculation from addrlib)
+ * 444 or 420 luma
+ */
+ use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END;
+ use_pitch_c = use_pitch_c
+ || (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
+ if (use_pitch_c) {
+ ASSERT(plane_size->chroma_pitch != 0);
+ /* Chroma pitch zero can cause system hang! */
+
+ pitch = plane_size->surface_pitch - 1;
+ meta_pitch = dcc->meta_pitch - 1;
+ pitch_c = plane_size->chroma_pitch - 1;
+ meta_pitch_c = dcc->meta_pitch_c - 1;
+ } else {
+ pitch = plane_size->surface_pitch - 1;
+ meta_pitch = dcc->meta_pitch - 1;
+ pitch_c = 0;
+ meta_pitch_c = 0;
+ }
+
+ if (!dcc->enable) {
+ meta_pitch = 0;
+ meta_pitch_c = 0;
+ }
+
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH,
+ PITCH, pitch, META_PITCH, meta_pitch);
+
+ use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN;
+ use_pitch_c = use_pitch_c
+ || (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
+ if (use_pitch_c)
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+}
+
+void hubp2_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t mirror;
+
+
+ if (horizontal_mirror)
+ mirror = 1;
+ else
+ mirror = 0;
+
+ /* Program rotation angle and horz mirror - no mirror */
+ if (rotation == ROTATION_ANGLE_0)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 0,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_90)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 1,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_180)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 2,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_270)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 3,
+ H_MIRROR_EN, mirror);
+}
+
+void hubp2_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size independent_64b_blks)
+{
+ uint32_t dcc_en = enable ? 1 : 0;
+ uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk,
+ SECONDARY_SURFACE_DCC_EN, dcc_en,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+}
+
+void hubp2_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t red_bar = 3;
+ uint32_t blue_bar = 2;
+
+ /* swap for ABGR format */
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ red_bar = 2;
+ blue_bar = 3;
+ }
+
+ REG_UPDATE_2(HUBPRET_CONTROL,
+ CROSSBAR_SRC_CB_B, blue_bar,
+ CROSSBAR_SRC_CR_R, red_bar);
+
+ /* Mapping is same as ipp programming (cnvc) */
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 1);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 3);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 8);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 10);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 24);
+ break;
+
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 65);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 64);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 67);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 66);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 12);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 112);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 113);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 114);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 118);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 119);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 0);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* don't see the need of program the xbar in DCN 1.0 */
+}
+
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
@@ -295,11 +543,11 @@ void hubp2_program_surface_config(
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
+ hubp2_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
hubp2_program_tiling(hubp2, tiling_info, format);
- hubp1_program_size(hubp, format, plane_size, dcc);
- hubp1_program_rotation(hubp, rotation, horizontal_mirror);
- hubp1_program_pixel_format(hubp, format);
+ hubp2_program_size(hubp, format, plane_size, dcc);
+ hubp2_program_rotation(hubp, rotation, horizontal_mirror);
+ hubp2_program_pixel_format(hubp, format);
}
enum cursor_lines_per_chunk hubp2_get_lines_per_chunk(
@@ -459,17 +707,6 @@ bool hubp2_program_surface_flip_and_addr(
REG_UPDATE(VMID_SETTINGS_0,
VMID, address->vmid);
- if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
-
- } else {
- // turn off stereo if not in stereo
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0);
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0);
- }
-
-
/* HW automatically latch rest of address register on write to
* DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
@@ -652,30 +889,733 @@ void hubp2_set_flip_control_surface_gsl(struct hubp *hubp, bool enable)
REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, enable ? 1 : 0);
}
+bool hubp2_is_flip_pending(struct hubp *hubp)
+{
+ uint32_t flip_pending = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dc_plane_address earliest_inuse_address;
+
+ if (hubp && hubp->power_gated)
+ return false;
+
+ REG_GET(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_PENDING, &flip_pending);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part);
+
+ if (flip_pending)
+ return true;
+
+ if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ return true;
+
+ return false;
+}
+
+void hubp2_set_blank(struct hubp *hubp, bool blank)
+{
+ hubp2_set_blank_regs(hubp, blank);
+
+ if (blank) {
+ hubp->mpcc_id = 0xf;
+ hubp->opp_id = OPP_ID_INVALID;
+ }
+}
+
+void hubp2_set_blank_regs(struct hubp *hubp, bool blank)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ if (blank) {
+ uint32_t reg_val = REG_READ(DCHUBP_CNTL);
+
+ if (reg_val) {
+ /* init sequence workaround: in case HUBP is
+ * power gated, this wait would timeout.
+ *
+ * we just wrote reg_val to non-0, if it stay 0
+ * it means HUBP is gated
+ */
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 100000);
+ }
+ }
+
+ REG_UPDATE_2(DCHUBP_CNTL,
+ HUBP_BLANK_EN, blank_en,
+ HUBP_TTU_DISABLE, 0);
+}
+
+void hubp2_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
+ int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+ int x_hotspot = pos->x_hotspot;
+ int y_hotspot = pos->y_hotspot;
+ int cursor_height = (int)hubp->curs_attr.height;
+ int cursor_width = (int)hubp->curs_attr.width;
+ uint32_t dst_x_offset;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+
+ /*
+ * Guard aganst cursor_set_position() from being called with invalid
+ * attributes
+ *
+ * TODO: Look at combining cursor_set_position() and
+ * cursor_set_attributes() into cursor_update()
+ */
+ if (hubp->curs_attr.address.quad_part == 0)
+ return;
+
+ // Rotated cursor width/height and hotspots tweaks for offset calculation
+ if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
+ swap(cursor_height, cursor_width);
+ if (param->rotation == ROTATION_ANGLE_90) {
+ src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
+ src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ }
+ } else if (param->rotation == ROTATION_ANGLE_180) {
+ src_x_offset = pos->x - param->viewport.x;
+ src_y_offset = pos->y - param->viewport.y;
+ }
+
+ if (param->mirror) {
+ x_hotspot = param->viewport.width - x_hotspot;
+ src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ }
+
+ dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+ dst_x_offset *= param->ref_clk_khz;
+ dst_x_offset /= param->pixel_clk_khz;
+
+ ASSERT(param->h_scale_ratio.value);
+
+ if (param->h_scale_ratio.value)
+ dst_x_offset = dc_fixpt_floor(dc_fixpt_div(
+ dc_fixpt_from_int(dst_x_offset),
+ param->h_scale_ratio));
+
+ if (src_x_offset >= (int)param->viewport.width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + cursor_width <= 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ if (src_y_offset >= (int)param->viewport.height)
+ cur_en = 0; /* not visible beyond bottom edge*/
+
+ if (src_y_offset + cursor_height <= 0)
+ cur_en = 0; /* not visible beyond top edge*/
+
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
+
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, x_hotspot,
+ CURSOR_HOT_SPOT_Y, y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ /* TODO Handle surface pixel formats other than 4:4:4 */
+}
+
+void hubp2_clk_cntl(struct hubp *hubp, bool enable)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t clk_enable = enable ? 1 : 0;
+
+ REG_UPDATE(HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, clk_enable);
+}
+
+void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
+}
+
+void hubp2_clear_underflow(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
+}
+
+void hubp2_read_state_common(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp2->state;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ /* Requester */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
+
+ REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR, &rq_regs->aperture_high_addr);
+
+ REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR, &rq_regs->aperture_low_addr);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
+
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
+
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
+
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
+
+ if (REG(PREFETCH_SETTINS))
+ REG_GET_2(PREFETCH_SETTINS,
+ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+ else
+ REG_GET_2(PREFETCH_SETTINGS,
+ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+
+ REG_GET_2(VBLANK_PARAMETERS_0,
+ DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
+
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
+
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
+
+ if (REG(PREFETCH_SETTINS_C))
+ REG_GET(PREFETCH_SETTINS_C,
+ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+ else
+ REG_GET(PREFETCH_SETTINGS_C,
+ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
+
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
+
+ REG_GET_2(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
+
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ &ttu_attr->refcyc_per_req_delivery_pre_l);
+
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
+
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ &ttu_attr->refcyc_per_req_delivery_pre_c);
+
+ /* Rest of hubp */
+ REG_GET(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &s->pixel_format);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &s->inuse_addr_lo);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
+ PRI_VIEWPORT_WIDTH, &s->viewport_width,
+ PRI_VIEWPORT_HEIGHT, &s->viewport_height);
+
+ REG_GET_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, &s->rotation_angle,
+ H_MIRROR_EN, &s->h_mirror_en);
+
+ REG_GET(DCSURF_TILING_CONFIG,
+ SW_MODE, &s->sw_mode);
+
+ REG_GET(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, &s->dcc_en);
+
+ REG_GET_3(DCHUBP_CNTL,
+ HUBP_BLANK_EN, &s->blank_en,
+ HUBP_TTU_DISABLE, &s->ttu_disable,
+ HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+
+ REG_GET(HUBP_CLK_CNTL,
+ HUBP_CLOCK_ENABLE, &s->clock_en);
+
+ REG_GET(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &s->min_ttu_vblank);
+
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
+
+ REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS,
+ PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_lo);
+
+ REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH,
+ PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_hi);
+
+ REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS,
+ PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_lo);
+
+ REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_hi);
+}
+
+void hubp2_read_state(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp2->state;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ hubp2_read_state_common(hubp);
+
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
+
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+
+}
+
+static void hubp2_validate_dml_output(struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+ struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+ struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+ DC_LOGGER_INIT(ctx->logger);
+ DC_LOG_DEBUG("DML Validation | Running Validation");
+
+ /* Requestor Regs */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+ if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+ DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+ if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+ if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+ if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+ if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+ if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+ if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+ if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+ if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+ if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+ if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+ if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+ if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+ if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+ if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+ if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+ if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+ if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+ if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size);
+ if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+ if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+ if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+ if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+ if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
+ dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+ if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+ DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+ if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+ if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+ if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+ DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
+ dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+ if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+ if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+ if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+ if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+ if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+ if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+ if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+ if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+ if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+ if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+ if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+ REG_GET_3(DCN_CUR0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+ REG_GET(FLIP_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+ REG_GET(DCN_CUR0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ REG_GET(DCN_CUR1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+ if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+ if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+ if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+ if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+ if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+ if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+ if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+ if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+ if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+}
+
static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp2_program_surface_config,
- .hubp_is_flip_pending = hubp1_is_flip_pending,
+ .hubp_is_flip_pending = hubp2_is_flip_pending,
.hubp_setup = hubp2_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings,
- .set_blank = hubp1_set_blank,
- .dcc_control = hubp1_dcc_control,
- .hubp_update_dchub = hubp2_update_dchub,
+ .set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
+ .dcc_control = hubp2_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
- .set_cursor_position = hubp1_cursor_set_position,
- .hubp_clk_cntl = hubp1_clk_cntl,
- .hubp_vtg_sel = hubp1_vtg_sel,
+ .set_cursor_position = hubp2_cursor_set_position,
+ .hubp_clk_cntl = hubp2_clk_cntl,
+ .hubp_vtg_sel = hubp2_vtg_sel,
.dmdata_set_attributes = hubp2_dmdata_set_attributes,
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
- .hubp_read_state = hubp1_read_state,
- .hubp_clear_underflow = hubp1_clear_underflow,
+ .hubp_read_state = hubp2_read_state,
+ .hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
+ .validate_dml_output = hubp2_validate_dml_output,
+ .hubp_in_blank = hubp1_in_blank,
+ .hubp_soft_reset = hubp1_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index d5acc348be22..9204c3ef323b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -38,12 +38,6 @@
SRI(PREFETCH_SETTINGS_C, HUBPREQ, id),\
SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id),\
SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BASE),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
SRI(CURSOR_SETTINGS, HUBPREQ, id), \
SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
@@ -72,8 +66,8 @@
SR(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),\
SR(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB)
-#define HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh)\
- HUBP_MASK_SH_LIST_DCN(mask_sh),\
+#define HUBP_MASK_SH_LIST_DCN2_SHARE_COMMON(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh),\
HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
@@ -82,12 +76,6 @@
HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR, MC_VM_SYSTEM_APERTURE_LOW_ADDR, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mask_sh),\
- HUBP_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh),\
- HUBP_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh),\
- HUBP_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh),\
- HUBP_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh),\
- HUBP_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh),\
- HUBP_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh),\
HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
@@ -127,13 +115,21 @@
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh)
+/*DCN2.x and DCN1.x*/
+#define HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN2_SHARE_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
+
+/*DCN2.0 specific*/
#define HUBP_MASK_SH_LIST_DCN20(mask_sh)\
HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh),\
HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh)
-
+/*DCN2.x */
#define DCN2_HUBP_REG_COMMON_VARIABLE_LIST \
HUBP_COMMON_REG_VARIABLE_LIST; \
uint32_t DMDATA_ADDRESS_HIGH; \
@@ -149,14 +145,24 @@
uint32_t FLIP_PARAMETERS_2;\
uint32_t DCN_CUR1_TTU_CNTL0;\
uint32_t DCN_CUR1_TTU_CNTL1;\
- uint32_t VMID_SETTINGS_0;\
+ uint32_t VMID_SETTINGS_0
+
+
+#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
+ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\
uint32_t FLIP_PARAMETERS_4;\
+ uint32_t FLIP_PARAMETERS_5;\
+ uint32_t FLIP_PARAMETERS_6;\
uint32_t VBLANK_PARAMETERS_5;\
uint32_t VBLANK_PARAMETERS_6
+#define DCN30_HUBP_REG_COMMON_VARIABLE_LIST \
+ DCN21_HUBP_REG_COMMON_VARIABLE_LIST;\
+ uint32_t DCN_DMDATA_VM_CNTL
+
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
- DCN_HUBP_REG_FIELD_LIST(type); \
+ DCN_HUBP_REG_FIELD_BASE_LIST(type); \
type DMDATA_ADDRESS_HIGH;\
type DMDATA_MODE;\
type DMDATA_UPDATED;\
@@ -180,17 +186,52 @@
type SURFACE_TRIPLE_BUFFER_ENABLE;\
type VMID
+#define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type REFCYC_PER_VM_GROUP_FLIP;\
+ type REFCYC_PER_VM_REQ_FLIP;\
+ type REFCYC_PER_VM_GROUP_VBLANK;\
+ type REFCYC_PER_VM_REQ_VBLANK;\
+ type REFCYC_PER_PTE_GROUP_FLIP_C; \
+ type REFCYC_PER_META_CHUNK_FLIP_C; \
+ type VM_GROUP_SIZE
+
+#define DCN30_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type PRIMARY_SURFACE_DCC_IND_BLK;\
+ type SECONDARY_SURFACE_DCC_IND_BLK;\
+ type PRIMARY_SURFACE_DCC_IND_BLK_C;\
+ type SECONDARY_SURFACE_DCC_IND_BLK_C;\
+ type ALPHA_PLANE_EN;\
+ type REFCYC_PER_VM_DMDATA;\
+ type DMDATA_VM_FAULT_STATUS;\
+ type DMDATA_VM_FAULT_STATUS_CLEAR; \
+ type DMDATA_VM_UNDERFLOW_STATUS;\
+ type DMDATA_VM_LATE_STATUS;\
+ type DMDATA_VM_UNDERFLOW_STATUS_CLEAR; \
+ type DMDATA_VM_DONE; \
+ type CROSSBAR_SRC_Y_G; \
+ type CROSSBAR_SRC_ALPHA; \
+ type PACK_3TO2_ELEMENT_DISABLE; \
+ type ROW_TTU_MODE; \
+ type NUM_PKRS
+
+#define DCN31_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN30_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type HUBP_UNBOUNDED_REQ_MODE;\
+ type CURSOR_REQ_MODE;\
+ type HUBP_SOFT_RESET
struct dcn_hubp2_registers {
- DCN2_HUBP_REG_COMMON_VARIABLE_LIST;
+ DCN30_HUBP_REG_COMMON_VARIABLE_LIST;
};
struct dcn_hubp2_shift {
- DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+ DCN31_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
};
struct dcn_hubp2_mask {
- DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+ DCN31_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
};
struct dcn20_hubp {
@@ -217,10 +258,6 @@ void hubp2_setup_interdependent(
void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
-void hubp2_update_dchub(
- struct hubp *hubp,
- struct dchub_init_data *dh_data);
-
void hubp2_cursor_set_attributes(
struct hubp *hubp,
const struct dc_cursor_attributes *attr);
@@ -262,16 +299,54 @@ bool hubp2_program_surface_flip_and_addr(
const struct dc_plane_address *address,
bool flip_immediate);
+void hubp2_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size independent_64b_blks);
+
+void hubp2_program_size(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ const struct plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc);
+
+void hubp2_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror);
+
+void hubp2_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format);
+
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level);
+bool hubp2_is_flip_pending(struct hubp *hubp);
+
+void hubp2_set_blank(struct hubp *hubp, bool blank);
+void hubp2_set_blank_regs(struct hubp *hubp, bool blank);
+
+void hubp2_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+void hubp2_clk_cntl(struct hubp *hubp, bool enable);
+
+void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst);
+
+void hubp2_clear_underflow(struct hubp *hubp);
+
+void hubp2_read_state_common(struct hubp *hubp);
+
+void hubp2_read_state(struct hubp *hubp);
+
#endif /* __DC_MEM_INPUT_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 0b84a322b8a2..ec6aa8d8b251 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -25,17 +25,15 @@
#include <linux/delay.h>
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
-#include "dcn20/dcn20_resource.h"
-#include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_resource.h"
#include "dcn20_hwseq.h"
#include "dce/dce_hwseq.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dcn20/dcn20_dsc.h"
-#endif
+#include "dcn20_dsc.h"
+#include "dcn20_optc.h"
#include "abm.h"
#include "clk_mgr.h"
#include "dmcu.h"
@@ -45,13 +43,19 @@
#include "ipp.h"
#include "mpc.h"
#include "mcif_wb.h"
+#include "dchubbub.h"
#include "reg_helper.h"
#include "dcn10/dcn10_cm_common.h"
-#include "dcn10/dcn10_hubbub.h"
-#include "dcn10/dcn10_optc.h"
#include "dc_link_dp.h"
#include "vm_helper.h"
#include "dccg.h"
+#include "dc_dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
+#include "hw_sequencer.h"
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
+#include "inc/link_enc_cfg.h"
+#include "link_hwss.h"
#define DC_LOGGER_INIT(logger)
@@ -64,58 +68,166 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void bios_golden_init(struct dc *dc)
+static int find_free_gsl_group(const struct dc *dc)
{
- struct dc_bios *bp = dc->ctx->dc_bios;
- int i;
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
- /* initialize dcn global */
- bp->funcs->enable_disp_power_gating(bp,
- CONTROLLER_ID_D0, ASIC_PIPE_INIT);
+ return 0;
+}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- /* initialize dcn per pipe */
- bp->funcs->enable_disp_power_gating(bp,
- CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
+/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
+ * This is only used to lock pipes in pipe splitting case with immediate flip
+ * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
+ * so we get tearing with freesync since we cannot flip multiple pipes
+ * atomically.
+ * We use GSL for this:
+ * - immediate flip: find first available GSL group if not already assigned
+ * program gsl with that group, set current OTG as master
+ * and always us 0x4 = AND of flip_ready from all pipes
+ * - vsync flip: disable GSL if used
+ *
+ * Groups in stream_res are stored as +1 from HW registers, i.e.
+ * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
+ * Using a magic value like -1 would require tracking all inits/resets
+ */
+static void dcn20_setup_gsl_group_as_lock(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
}
+
+ /* at this point we want to program whether it's to enable or disable */
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ pipe_ctx->stream_res.tg->funcs->set_gsl(
+ pipe_ctx->stream_res.tg,
+ &gsl);
+
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ } else
+ BREAK_TO_DEBUGGER();
}
-static void enable_power_gating_plane(
+void dcn20_set_flip_control_gsl(
+ struct pipe_ctx *pipe_ctx,
+ bool flip_immediate)
+{
+ if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ pipe_ctx->plane_res.hubp, flip_immediate);
+
+}
+
+void dcn20_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
- bool force_on = 1; /* disable power gating */
+ bool force_on = true; /* disable power gating */
if (enable)
- force_on = 0;
+ force_on = false;
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
- /*Do not power gate DCHUB5, should be left at HW default, power on permanently*/
- /*REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN10_POWER_FORCEON, force_on);*/
+ if (REG(DOMAIN8_PG_CONFIG))
+ REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
+ if (REG(DOMAIN10_PG_CONFIG))
+ REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
/* DPP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
- /*Do not power gate DPP5, should be left at HW default, power on permanently*/
- /*REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN11_POWER_FORCEON, force_on);*/
+ if (REG(DOMAIN9_PG_CONFIG))
+ REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
+ if (REG(DOMAIN11_PG_CONFIG))
+ REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
+ /* DCS0/1/2/3/4/5 */
REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
+ if (REG(DOMAIN19_PG_CONFIG))
+ REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on);
+ if (REG(DOMAIN20_PG_CONFIG))
+ REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
+ if (REG(DOMAIN21_PG_CONFIG))
+ REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
}
-static void dcn20_dccg_init(struct dce_hwseq *hws)
+void dcn20_dccg_init(struct dce_hwseq *hws)
{
/*
* set MICROSECOND_TIME_BASE_DIV
@@ -136,10 +248,10 @@ static void dcn20_dccg_init(struct dce_hwseq *hws)
REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
/* This value is dependent on the hardware pipeline delay so set once per SOC */
- REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
+ REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
}
-static void disable_vga(
+void dcn20_disable_vga(
struct dce_hwseq *hws)
{
REG_WRITE(D1VGA_CONTROL, 0);
@@ -150,23 +262,24 @@ static void disable_vga(
REG_WRITE(D6VGA_CONTROL, 0);
}
-void dcn20_program_tripleBuffer(
+void dcn20_program_triple_buffer(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer)
+ bool enable_triple_buffer)
{
if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
pipe_ctx->plane_res.hubp,
- enableTripleBuffer);
+ enable_triple_buffer);
}
}
/* Blank pixel data during initialization */
-static void dcn20_init_blank(
+void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg)
{
+ struct dce_hwseq *hws = dc->hwseq;
enum dc_color_space color_space;
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
@@ -185,38 +298,49 @@ static void dcn20_init_blank(
/* get the OPTC source */
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+
+ if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
opp = dc->res_pool->opps[opp_id_src0];
if (num_opps == 2) {
otg_active_width = otg_active_width / 2;
- ASSERT(opp_id_src1 < dc->res_pool->res_cap->num_opp);
+
+ if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
bottom_opp = dc->res_pool->opps[opp_id_src1];
}
opp->funcs->opp_set_disp_pattern_generator(
opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
if (num_opps == 2) {
bottom_opp->funcs->opp_set_disp_pattern_generator(
bottom_opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
}
- dcn20_hwss_wait_for_blank_complete(opp);
+ hws->funcs.wait_for_blank_complete(opp);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static void dcn20_dsc_pg_control(
+void dcn20_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on)
@@ -292,9 +416,8 @@ static void dcn20_dsc_pg_control(
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
-#endif
-static void dcn20_dpp_pg_control(
+void dcn20_dpp_pg_control(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on)
@@ -368,7 +491,7 @@ static void dcn20_dpp_pg_control(
}
-static void dcn20_hubp_pg_control(
+void dcn20_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
bool power_on)
@@ -442,34 +565,12 @@ static void dcn20_hubp_pg_control(
}
-
-static void dcn20_plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
-{
- struct dce_hwseq *hws = dc->hwseq;
- struct dpp *dpp = pipe_ctx->plane_res.dpp;
-
- DC_LOGGER_INIT(dc->ctx->logger);
-
- if (REG(DC_IP_REQUEST_CNTL)) {
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
- dcn20_dpp_pg_control(hws, dpp->inst, false);
- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, false);
- dpp->funcs->dpp_reset(dpp);
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 0);
- DC_LOG_DEBUG(
- "Power gated front end %d\n", pipe_ctx->pipe_idx);
- }
-}
-
-
-
/* disable HW used by plane.
* note: cannot disable until disconnect is complete
*/
-static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
@@ -488,9 +589,10 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true;
- dc->optimized_required = false; /* We're powering off, no need to optimize */
- dcn20_plane_atomic_power_down(dc, pipe_ctx);
+ hws->funcs.plane_atomic_power_down(dc,
+ pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp);
pipe_ctx->stream = NULL;
memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
@@ -514,197 +616,32 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->pipe_idx);
}
-static void dcn20_init_hw(struct dc *dc)
+void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank)
{
- int i, j;
- struct abm *abm = dc->res_pool->abm;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct dce_hwseq *hws = dc->hwseq;
- struct dc_bios *dcb = dc->ctx->dc_bios;
- struct resource_pool *res_pool = dc->res_pool;
- struct dc_state *context = dc->current_state;
- struct dc_firmware_info fw_info = { { 0 } };
-
- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
- dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
-
- // Initialize the dccg
- if (res_pool->dccg->funcs->dccg_init)
- res_pool->dccg->funcs->dccg_init(res_pool->dccg);
-
- //Enable ability to power gate / don't force power on permanently
- enable_power_gating_plane(dc->hwseq, true);
-
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
- REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
-
- dcn20_dccg_init(hws);
-
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(REFCLK_CNTL, 0);
- } else {
- if (!dcb->funcs->is_accelerated_mode(dcb)) {
- bios_golden_init(dc);
- if (dc->ctx->dc_bios->funcs->get_firmware_info(
- dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
- res_pool->ref_clocks.xtalin_clock_inKhz = fw_info.pll_info.crystal_frequency;
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (res_pool->dccg && res_pool->hubbub) {
-
- (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
- fw_info.pll_info.crystal_frequency,
- &res_pool->ref_clocks.dccg_ref_clock_inKhz);
-
- (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
- } else {
- // Not all ASICs have DCCG sw component
- res_pool->ref_clocks.dccg_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- res_pool->ref_clocks.dchub_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- }
- }
- } else
- ASSERT_CRITICAL(false);
- disable_vga(dc->hwseq);
- }
-
- for (i = 0; i < dc->link_count; i++) {
- /* Power up AND update implementation according to the
- * required signal (which may be different from the
- * default signal on connector).
- */
- struct dc_link *link = dc->links[i];
-
- link->link_enc->funcs->hw_init(link->link_enc);
- }
- }
-
- /* Blank pixel data with OPP DPG */
- for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- if (tg->funcs->is_tg_enabled(tg)) {
- dcn20_init_blank(dc, tg);
- }
- }
-
- for (i = 0; i < res_pool->timing_generator_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- if (tg->funcs->is_tg_enabled(tg))
- tg->funcs->lock(tg);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct dpp *dpp = res_pool->dpps[i];
-
- dpp->funcs->dpp_reset(dpp);
- }
-
- /* Reset all MPCC muxes */
- res_pool->mpc->funcs->mpc_init(res_pool->mpc);
-
- /* initialize OPP mpc_tree parameter */
- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
- res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
- res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
- for (j = 0; j < MAX_PIPES; j++)
- res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct hubp *hubp = dc->res_pool->hubps[i];
- struct dpp *dpp = dc->res_pool->dpps[i];
-
- pipe_ctx->stream_res.tg = tg;
- pipe_ctx->pipe_idx = i;
-
- pipe_ctx->plane_res.hubp = hubp;
- pipe_ctx->plane_res.dpp = dpp;
- pipe_ctx->plane_res.mpcc_inst = dpp->inst;
- hubp->mpcc_id = dpp->inst;
- hubp->opp_id = OPP_ID_INVALID;
- hubp->power_gated = false;
- pipe_ctx->stream_res.opp = NULL;
-
- hubp->funcs->hubp_init(hubp);
-
- //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
- //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
- dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
- pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- /*to do*/
- hwss1_plane_atomic_disconnect(dc, pipe_ctx);
- }
-
- /* initialize DWB pointer to MCIF_WB */
- for (i = 0; i < res_pool->res_cap->num_dwb; i++)
- res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
-
- for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- if (tg->funcs->is_tg_enabled(tg))
- tg->funcs->unlock(tg);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- dc->hwss.disable_plane(dc, pipe_ctx);
-
- pipe_ctx->stream_res.tg = NULL;
- pipe_ctx->plane_res.hubp = NULL;
- }
-
- for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- tg->funcs->tg_init(tg);
- }
-
- /* end of FPGA. Below if real ASIC */
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- return;
-
-
- for (i = 0; i < res_pool->audio_count; i++) {
- struct audio *audio = res_pool->audios[i];
-
- audio->funcs->hw_init(audio);
- }
-
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
- }
-
- if (dmcu != NULL)
- dmcu->funcs->dmcu_init(dmcu);
+ dcn20_blank_pixel_data(dc, pipe_ctx, blank);
+}
- if (abm != NULL && dmcu != NULL)
- abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
+ int opp_cnt)
+{
+ bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
+ int flow_ctrl_cnt;
- /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
- REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+ if (opp_cnt >= 2)
+ hblank_halved = true;
- if (!dc->debug.disable_clock_gate) {
- /* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+ flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
+ stream->timing.h_border_left -
+ stream->timing.h_border_right;
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+ if (hblank_halved)
+ flow_ctrl_cnt /= 2;
- REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
- }
+ /* ODM combine 4:1 case */
+ if (opp_cnt == 4)
+ flow_ctrl_cnt /= 2;
+ return flow_ctrl_cnt;
}
enum dc_status dcn20_enable_stream_timing(
@@ -712,14 +649,18 @@ enum dc_status dcn20_enable_stream_timing(
struct dc_state *context,
struct dc *dc)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dc_stream_state *stream = pipe_ctx->stream;
struct drr_params params = {0};
unsigned int event_triggers = 0;
-
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
-#endif
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
+ bool interlace = stream->timing.flags.INTERLACE;
+ int i;
+ struct mpc_dwb_flow_control flow_control;
+ struct mpc *mpc = dc->res_pool->mpc;
+ bool rate_control_2x_pclk = (interlace || optc2_is_two_pixels_per_containter(&stream->timing));
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
@@ -730,12 +671,17 @@ enum dc_status dcn20_enable_stream_timing(
/* TODO check if timing_changed, disable stream if timing changed */
- if (odm_pipe)
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
+ opp_cnt++;
+ }
+
+ if (opp_cnt > 1)
pipe_ctx->stream_res.tg->funcs->set_odm_combine(
pipe_ctx->stream_res.tg,
- odm_pipe->stream_res.opp->inst,
- pipe_ctx->stream->timing.h_addressable/2,
- pipe_ctx->stream->timing.pixel_encoding);
+ opp_inst, opp_cnt,
+ &pipe_ctx->stream->timing);
+
/* HW program guide assume display already disable
* by unplug sequence. OTG assume stop.
*/
@@ -749,6 +695,9 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
+ dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -759,11 +708,21 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream->signal,
true);
- if (pipe_ctx->stream_res.tg->funcs->setup_global_lock)
- pipe_ctx->stream_res.tg->funcs->setup_global_lock(
- pipe_ctx->stream_res.tg);
+ rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
+ flow_control.flow_ctrl_mode = 0;
+ flow_control.flow_ctrl_cnt0 = 0x80;
+ flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ true,
+ rate_control_2x_pclk,
+ &flow_control);
+ }
+ }
- if (odm_pipe)
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
true);
@@ -772,7 +731,7 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream_res.opp,
true);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, true);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
@@ -780,10 +739,12 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
+ params.vertical_total_mid = stream->adjust.v_total_mid;
+ params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, &params);
@@ -791,9 +752,13 @@ enum dc_status dcn20_enable_stream_timing(
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx->stream_res.tg, event_triggers);
+ pipe_ctx->stream_res.tg, event_triggers, 2);
/* TODO program crtc source select for non-virtual signal*/
/* TODO program FMT */
@@ -814,6 +779,10 @@ void dcn20_program_output_csc(struct dc *dc,
{
struct mpc *mpc = dc->res_pool->mpc;
enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+
+ if (mpc->funcs->power_on_mpc_mem_pwr)
+ mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
if (mpc->funcs->set_output_csc != NULL)
@@ -830,7 +799,7 @@ void dcn20_program_output_csc(struct dc *dc,
}
}
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
@@ -843,7 +812,9 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
* if programming for all pipes is required then remove condition
* pipe_ctx->top_pipe == NULL ,but then fix the diagnostic.
*/
- if ((pipe_ctx->top_pipe == NULL || dc_res_is_odm_head_pipe(pipe_ctx))
+ if (mpc->funcs->power_on_mpc_mem_pwr)
+ mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
+ if (pipe_ctx->top_pipe == NULL
&& mpc->funcs->set_output_gamma && stream->out_transfer_func) {
if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
params = &stream->out_transfer_func->pwl;
@@ -867,7 +838,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
return true;
}
-static bool dcn20_set_blend_lut(
+bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -889,7 +860,7 @@ static bool dcn20_set_blend_lut(
return result;
}
-static bool dcn20_set_shaper_3dlut(
+bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -909,26 +880,20 @@ static bool dcn20_set_shaper_3dlut(
result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut);
if (plane_state->lut3d_func &&
- plane_state->lut3d_func->initialized == true)
+ plane_state->lut3d_func->state.bits.initialized == 1)
result = dpp_base->funcs->dpp_program_3dlut(dpp_base,
&plane_state->lut3d_func->lut_3d);
else
result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
- if (plane_state->lut3d_func &&
- plane_state->lut3d_func->initialized == true &&
- plane_state->lut3d_func->hdr_multiplier != 0)
- dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base,
- plane_state->lut3d_func->hdr_multiplier);
- else
- dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000);
-
return result;
}
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
+bool dcn20_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
bool result = true;
@@ -937,8 +902,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
if (dpp_base == NULL || plane_state == NULL)
return false;
- dcn20_set_shaper_3dlut(pipe_ctx, plane_state);
- dcn20_set_blend_lut(pipe_ctx, plane_state);
+ hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
+ hws->funcs.set_blend_lut(pipe_ctx, plane_state);
if (plane_state->in_transfer_func)
tf = plane_state->in_transfer_func;
@@ -983,6 +948,11 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+ cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+ result = true;
+ break;
default:
result = false;
break;
@@ -1003,16 +973,22 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
- struct pipe_ctx *combine_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
+ opp_cnt++;
+ }
- if (combine_pipe)
+ if (opp_cnt > 1)
pipe_ctx->stream_res.tg->funcs->set_odm_combine(
pipe_ctx->stream_res.tg,
- combine_pipe->stream_res.opp->inst,
- pipe_ctx->plane_res.scl_data.h_active,
- pipe_ctx->stream->timing.pixel_encoding);
+ opp_inst, opp_cnt,
+ &pipe_ctx->stream->timing);
else
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
@@ -1028,49 +1004,61 @@ void dcn20_blank_pixel_data(
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space = stream->output_color_space;
enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
- struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ struct pipe_ctx *odm_pipe;
+ int odm_cnt = 1;
int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ if (stream->link->test_pattern_enabled)
+ return;
+
/* get opp dpg blank color */
color_space_to_black_color(dc, color_space, &black_color);
- if (bot_odm_pipe)
- width = width / 2;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_cnt++;
+
+ width = width / odm_cnt;
if (blank) {
- if (stream_res->abm)
- stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
- if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
} else {
test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
}
- stream_res->opp->funcs->opp_set_disp_pattern_generator(
- stream_res->opp,
+ dc->hwss.set_disp_pattern_generator(dc,
+ pipe_ctx,
test_pattern,
+ test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
- if (bot_odm_pipe) {
- bot_odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
- bot_odm_pipe->stream_res.opp,
- dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE ?
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ dc->hwss.set_disp_pattern_generator(dc,
+ odm_pipe,
+ dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+ test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
}
if (!blank)
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+ dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
}
@@ -1084,8 +1072,13 @@ static void dcn20_power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -1093,10 +1086,8 @@ static void dcn20_power_on_plane(
}
}
-void dcn20_enable_plane(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
+static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
{
//if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
@@ -1106,6 +1097,9 @@ void dcn20_enable_plane(
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
+ /* initialize HUBP on power up */
+ pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
+
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
@@ -1153,56 +1147,444 @@ void dcn20_enable_plane(
apt.sys_default.quad_part = 0;
- apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr;
- apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr;
+ apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
+ apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
}
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
+
// if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
// }
}
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ struct pipe_ctx *temp_pipe;
+ bool flip_immediate = false;
-static void dcn20_program_pipe(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
+ /* use TG master update lock to lock everything on the TG
+ * therefore only top pipe need to lock
+ */
+ if (!pipe || pipe->top_pipe)
+ return;
+
+ if (pipe->plane_state != NULL)
+ flip_immediate = pipe->plane_state->flip_immediate;
+
+ if (pipe->stream_res.gsl_group > 0) {
+ temp_pipe = pipe->bottom_pipe;
+ while (!flip_immediate && temp_pipe) {
+ if (temp_pipe->plane_state != NULL)
+ flip_immediate = temp_pipe->plane_state->flip_immediate;
+ temp_pipe = temp_pipe->bottom_pipe;
+ }
+ }
+
+ if (flip_immediate && lock) {
+ const int TIMEOUT_FOR_FLIP_PENDING = 100000;
+ int i;
+
+ temp_pipe = pipe;
+ while (temp_pipe) {
+ if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) {
+ for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
+ if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp))
+ break;
+ udelay(1);
+ }
+
+ /* no reason it should take this long for immediate flips */
+ ASSERT(i != TIMEOUT_FOR_FLIP_PENDING);
+ }
+ temp_pipe = temp_pipe->bottom_pipe;
+ }
+ }
+
+ /* In flip immediate and pipe splitting case, we need to use GSL
+ * for synchronization. Only do setup on locking and on flip type change.
+ */
+ if (lock && (pipe->bottom_pipe != NULL || !flip_immediate))
+ if ((flip_immediate && pipe->stream_res.gsl_group == 0) ||
+ (!flip_immediate && pipe->stream_res.gsl_group > 0))
+ dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate);
+
+ if (pipe->plane_state != NULL)
+ flip_immediate = pipe->plane_state->flip_immediate;
+
+ temp_pipe = pipe->bottom_pipe;
+ while (flip_immediate && temp_pipe) {
+ if (temp_pipe->plane_state != NULL)
+ flip_immediate = temp_pipe->plane_state->flip_immediate;
+ temp_pipe = temp_pipe->bottom_pipe;
+ }
+
+ if (!lock && pipe->stream_res.gsl_group > 0 && pipe->plane_state &&
+ !flip_immediate)
+ dcn20_setup_gsl_group_as_lock(dc, pipe, false);
+
+ if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_pipe = 1;
+ inst_flags.otg_inst = pipe->stream_res.tg->inst;
+
+ if (pipe->plane_state != NULL)
+ hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ lock,
+ &hw_locks,
+ &inst_flags);
+ } else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
+ if (lock)
+ pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg);
+ } else {
+ if (lock)
+ pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ }
+}
+
+static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
{
- pipe_ctx->plane_state->update_flags.bits.full_update =
- context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update;
+ new_pipe->update_flags.raw = 0;
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn20_enable_plane(dc, pipe_ctx, context);
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
+ return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ new_pipe->update_flags.bits.det_size = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
- update_dchubp_dpp(dc, pipe_ctx, context);
+ /* Detect plane change */
+ if (old_pipe->plane_state != new_pipe->plane_state) {
+ new_pipe->update_flags.bits.plane_changed = true;
+ }
- set_hdr_multiplier(pipe_ctx);
+ /* Detect top pipe only changes */
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ /* Detect odm changes */
+ if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
+ || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
+ || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Detect global sync changes */
+ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
+ || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
+ || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
+ || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+ if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
+ new_pipe->update_flags.bits.det_size = 1;
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /*
+ * Detect mpcc blending changes, only dpp inst and opp matter here,
+ * mpccs getting removed/inserted update connected ones during their own
+ * programming
*/
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
+ struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
+
+ /* Detect pipe interdependent updates */
+ if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
+ old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
+ old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
+ old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
+ old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
+ old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
+ old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
+ old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
+ old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
+ old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
+ old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
+ old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
+ old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
+ old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
+ old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
+ old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
+ old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
+ old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
+ old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
+ memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
+ memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
}
-static void dcn20_program_all_pipe_in_tree(
+static void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool viewport_changed = false;
+
+ if (pipe_ctx->update_flags.bits.dppclk)
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+
+ if (hubp->funcs->set_unbounded_requesting)
+ hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
+ }
+ if (pipe_ctx->update_flags.bits.hubp_interdependent)
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ struct dc_bias_and_scale bns_params = {0};
+
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ plane_state->color_space,
+ NULL);
+
+ if (dpp->funcs->dpp_program_bias_and_scale) {
+ //TODO :for CNVC set scale and bias registers if necessary
+ build_prescale_params(&bns_params, plane_state);
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.mpcc
+ || pipe_ctx->update_flags.bits.plane_changed
+ || plane_state->update_flags.bits.global_alpha_change
+ || plane_state->update_flags.bits.per_pixel_alpha_change) {
+ // MPCC inst is equal to pipe index in practice
+ int mpcc_inst = hubp->inst;
+ int opp_inst;
+ int opp_count = dc->res_pool->pipe_count;
+
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ hws->funcs.update_mpcc(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
+
+ /* Any updates are handled in dc interface, just need to apply existing for plane enable */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
+
+ /* Any updates are handled in dc interface, just need
+ * to apply existing for plane enable / opp change */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->stream->update_flags.bits.gamut_remap
+ || pipe_ctx->stream->update_flags.bits.out_csc) {
+ /* dpp/cm gamut remap*/
+ dc->hwss.program_gamut_remap(pipe_ctx);
+
+ /*call the dcn2 method which uses mpc csc*/
+ dc->hwss.program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+ hubp->opp_id);
+ }
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror,
+ 0);
+ hubp->power_gated = false;
+ }
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.addr_update)
+ hws->funcs.update_plane_addr(dc, pipe_ctx);
+
+
+
+ if (pipe_ctx->update_flags.bits.enable)
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void dcn20_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
- if (pipe_ctx->top_pipe == NULL) {
- bool blank = !is_pipe_tree_visible(pipe_ctx);
+ struct dce_hwseq *hws = dc->hwseq;
+ /* Only need to unblank on top pipe */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
+ && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
@@ -1211,192 +1593,261 @@ static void dcn20_program_all_pipe_in_tree(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ }
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ if (pipe_ctx->update_flags.bits.odm)
+ hws->funcs.update_odm(dc, context, pipe_ctx);
- if (dc->hwss.update_odm)
- dc->hwss.update_odm(dc, context, pipe_ctx);
+ if (pipe_ctx->update_flags.bits.enable) {
+ dcn20_enable_plane(dc, pipe_ctx, context);
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
}
- if (pipe_ctx->plane_state != NULL)
- dcn20_program_pipe(dc, pipe_ctx, context);
+ if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
- if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
-}
+ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock)
-{
- if (lock) {
- pipe->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
- } else {
- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe->stream_res.tg);
- }
-}
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->plane_state->update_flags.bits.hdr_mult)
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
-void dcn20_pipe_control_lock(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock)
-{
- bool flip_immediate = false;
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
- /* use TG master update lock to lock everything on the TG
- * therefore only top pipe need to lock
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
*/
- if (pipe->top_pipe)
- return;
-
- if (pipe->plane_state != NULL)
- flip_immediate = pipe->plane_state->flip_immediate;
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
- /* In flip immediate and pipe splitting case, we need to use GSL
- * for synchronization. Only do setup on locking and on flip type change.
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
*/
- if (lock && pipe->bottom_pipe != NULL)
- if ((flip_immediate && pipe->stream_res.gsl_group == 0) ||
- (!flip_immediate && pipe->stream_res.gsl_group > 0))
- dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate);
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
- if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
- if (lock)
- pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
- else
- pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg);
- } else {
- if (lock)
- pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
- else
- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
}
}
-static void dcn20_apply_ctx_for_surface(
+void dcn20_program_front_end_for_ctx(
struct dc *dc,
- const struct dc_stream_state *stream,
- int num_planes,
struct dc_state *context)
{
-
int i;
- struct timing_generator *tg;
- bool removed_pipe[6] = { false };
- bool interdependent_update = false;
- struct pipe_ctx *top_pipe_to_program =
- find_top_pipe_for_stream(dc, context, stream);
+ struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
- if (!top_pipe_to_program)
- return;
-
- tg = top_pipe_to_program->stream_res.tg;
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
- interdependent_update = top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update;
+ if (pipe_ctx->stream == old_pipe_ctx->stream)
+ pipe_ctx->stream_res.gsl_group =
+ old_pipe_ctx->stream_res.gsl_group;
+ }
- if (interdependent_update)
- lock_all_pipes(dc, context, true);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, true);
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (num_planes == 0) {
- /* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
}
- /* Disconnect unused mpcc */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dcn20_disable_plane(dc, old_pipe_ctx);
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (hubbub->funcs->program_det_size && context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
- if ((!pipe_ctx->plane_state ||
- pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
- old_pipe_ctx->plane_state &&
- old_pipe_ctx->stream_res.tg == tg) {
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ if (hws->funcs.program_pipe)
+ hws->funcs.program_pipe(dc, pipe, context);
+ else
+ dcn20_program_pipe(dc, pipe, context);
- DC_LOG_DC("Reset mpcc for pipe %d\n",
- old_pipe_ctx->pipe_idx);
+ pipe = pipe->bottom_pipe;
+ }
+ }
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+
+ /* Avoid underflow by check of pipe line read when adding 2nd plane. */
+ if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
}
+}
+
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ struct dce_hwseq *hwseq = dc->hwseq;
- if (num_planes > 0)
- dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+ DC_LOGGER_INIT(dc->ctx->logger);
- /* Program secondary blending tree and writeback pipes */
- if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
- dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- if (interdependent_update)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ /*
+ * If we are enabling a pipe, we need to wait for pending clear as this is a critical
+ * part of the enable operation otherwise, DM may request an immediate flip which
+ * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
+ * is unsupported on DCN.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ mdelay(1);
+ }
+ }
- /* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
- !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
- continue;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpcc_pipe;
- pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
- pipe_ctx->plane_res.hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ if (pipe->vtp_locked) {
+ dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp);
+ pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true);
+ pipe->vtp_locked = false;
+
+ for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
}
+ }
+ /* WA to apply WM setting*/
+ if (hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
- if (interdependent_update)
- lock_all_pipes(dc, context, false);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, false);
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
- dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
-}
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
+ }
+ }
+}
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
+ unsigned int compbuf_size_kb = 0;
+
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
/* program dchubbub watermarks */
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
- dc->clk_mgr->funcs->update_clocks(
- dc->clk_mgr,
- context,
- false);
+ /* decrease compbuf size */
+ if (hubbub->funcs->program_compbuf_size) {
+ if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes)
+ compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
+ else
+ compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
+
+ hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
+ }
}
void dcn20_optimize_bandwidth(
@@ -1404,6 +1855,7 @@ void dcn20_optimize_bandwidth(
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int i;
/* program dchubbub watermarks */
hubbub->funcs->program_watermarks(hubbub,
@@ -1411,10 +1863,29 @@ void dcn20_optimize_bandwidth(
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
+ if (dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
+ context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
true);
+ if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
+ && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
+ && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
+ pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
+ pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+ }
+ }
+ /* increase compbuf size */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
}
bool dcn20_update_bandwidth(
@@ -1422,6 +1893,7 @@ bool dcn20_update_bandwidth(
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
@@ -1448,9 +1920,13 @@ bool dcn20_update_bandwidth(
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
+
+ if (pipe_ctx->prev_odm_pipe == NULL)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1464,10 +1940,10 @@ bool dcn20_update_bandwidth(
return true;
}
-static void dcn20_enable_writeback(
+void dcn20_enable_writeback(
struct dc *dc,
- const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info)
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
@@ -1479,12 +1955,11 @@ static void dcn20_enable_writeback(
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
/* set the OPTC source mux */
- ASSERT(stream_status->primary_otg_inst < MAX_PIPES);
- optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst];
+ optc = dc->res_pool->timing_generators[dwb->otg_inst];
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
- mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
/* Enable MCIF_WB */
mcif_wb->funcs->enable_mcif(mcif_wb);
/* Enable DWB */
@@ -1507,7 +1982,7 @@ void dcn20_disable_writeback(
mcif_wb->funcs->disable_mcif(mcif_wb);
}
-bool dcn20_hwss_wait_for_blank_complete(
+bool dcn20_wait_for_blank_complete(
struct output_pixel_processor *opp)
{
int counter;
@@ -1536,32 +2011,34 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
return hubp->funcs->dmdata_status_done(hubp);
}
-static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dce_hwseq *hws = dc->hwseq;
- struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
if (pipe_ctx->stream_res.dsc) {
- dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
- if (bot_odm_pipe)
- dcn20_dsc_pg_control(hws, bot_odm_pipe->stream_res.dsc->inst, true);
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+
+ hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
+ while (odm_pipe) {
+ hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
}
-#endif
}
-static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dce_hwseq *hws = dc->hwseq;
- struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
if (pipe_ctx->stream_res.dsc) {
- dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
- if (bot_odm_pipe)
- dcn20_dsc_pg_control(hws, bot_odm_pipe->stream_res.dsc->inst, false);
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+
+ hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
+ while (odm_pipe) {
+ hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
}
-#endif
}
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
@@ -1584,12 +2061,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
hubp->funcs->dmdata_set_attributes(hubp, &attr);
}
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx, int option)
-{
- dce110_disable_stream(pipe_ctx, option);
-}
-
-static void dcn20_init_vm_ctx(
+void dcn20_init_vm_ctx(
struct dce_hwseq *hws,
struct dc *dc,
struct dc_virtual_addr_space_config *va_config,
@@ -1611,7 +2083,7 @@ static void dcn20_init_vm_ctx(
dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);
}
-static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
struct dcn_hubbub_phys_addr_config config;
@@ -1624,6 +2096,7 @@ static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_ph
config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+ config.page_table_default_page_addr = pa_config->page_table_default_page_addr;
return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
}
@@ -1650,12 +2123,13 @@ static bool patch_address_for_sbs_tb_stereo(
plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
plane_state->address.grph_stereo.right_addr =
plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.right_meta_addr =
+ plane_state->address.grph_stereo.left_meta_addr;
}
return false;
}
-
-static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
@@ -1686,33 +2160,43 @@ static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
- struct encoder_unblank_param params = { { 0 } };
+ struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- params.odm = dc_res_get_odm_bottom_pipe(pipe_ctx);
+ struct dce_hwseq *hws = link->dc->hwseq;
+ struct pipe_ctx *odm_pipe;
+ params.opp_cnt = 1;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ params.opp_cnt++;
+ }
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
- if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (optc1_is_two_pixels_per_containter(&stream->timing) || params.odm)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ pipe_ctx->stream_res.tg->inst);
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
- pipe_ctx->stream_res.stream_enc, params.odm);
- pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, true);
+ hws->funcs.edp_backlight_control(link, true);
}
}
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (start_line < 0)
start_line = 0;
@@ -1727,6 +2211,7 @@ static void dcn20_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
+ struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -1734,13 +2219,35 @@ static void dcn20_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable */
- if (!pipe_ctx->stream->dpms_off)
- core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
- else if (pipe_ctx->stream_res.audio) {
- dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ core_link_disable_stream(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ /* free acquired resources */
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
+ }
}
-
+ }
+ else if (pipe_ctx->stream_res.dsc) {
+ dp_set_dsc_enable(pipe_ctx, false);
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
@@ -1748,12 +2255,19 @@ static void dcn20_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, NULL);
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1768,11 +2282,12 @@ static void dcn20_reset_back_end_for_pipe(
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
-static void dcn20_reset_hw_ctx_wrap(
+void dcn20_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
@@ -1783,7 +2298,7 @@ static void dcn20_reset_hw_ctx_wrap(
if (!pipe_ctx_old->stream)
continue;
- if (pipe_ctx_old->top_pipe)
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
continue;
if (!pipe_ctx->stream ||
@@ -1791,41 +2306,58 @@ static void dcn20_reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating)
- dc->hwss.enable_stream_gating(dc, pipe_ctx);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
}
}
-static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
+{
+ struct mpc *mpc = dc->res_pool->mpc;
+
+ // input to MPCC is always RGB, by default leave black_color at 0
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
+ get_hdr_visual_confirm_color(pipe_ctx, color);
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
+ get_surface_visual_confirm_color(pipe_ctx, color);
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
+ get_mpctree_visual_confirm_color(pipe_ctx, color);
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
+ get_surface_tile_visual_confirm_color(pipe_ctx, color);
+
+ if (mpc->funcs->set_bg_color)
+ mpc->funcs->set_bg_color(mpc, color, mpcc_id);
+}
+
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg = { {0} };
- bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
int mpcc_id;
struct mpcc *new_mpcc;
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- // input to MPCC is always RGB, by default leave black_color at 0
- if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
- dcn10_get_hdr_visual_confirm_color(
- pipe_ctx, &blnd_cfg.black_color);
- } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
- dcn10_get_surface_visual_confirm_color(
- pipe_ctx, &blnd_cfg.black_color);
- }
-
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
@@ -1836,7 +2368,10 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.top_gain = 0x1f000;
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
- blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+
+ if (pipe_ctx->plane_state->format
+ == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
/*
* TODO: remove hack
@@ -1849,8 +2384,10 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
mpcc_id = hubp->inst;
/* If there is no full update, don't need to touch MPC tree*/
- if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
return;
}
@@ -1872,162 +2409,247 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
NULL,
hubp->inst,
mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
ASSERT(new_mpcc != NULL);
hubp->opp_id = pipe_ctx->stream_res.opp->inst;
hubp->mpcc_id = mpcc_id;
}
-static int find_free_gsl_group(const struct dc *dc)
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
{
- if (dc->res_pool->gsl_groups.gsl_0 == 0)
- return 1;
- if (dc->res_pool->gsl_groups.gsl_1 == 0)
- return 2;
- if (dc->res_pool->gsl_groups.gsl_2 == 0)
- return 3;
+ enum dc_lane_count lane_count =
+ pipe_ctx->stream->link->cur_link_settings.lane_count;
- return 0;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link *link = pipe_ctx->stream->link;
+
+ uint32_t active_total_with_borders;
+ uint32_t early_control = 0;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->hwseq->funcs.setup_hpo_hw_control)
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
+ }
+
+ link_hwss->setup_stream_encoder(pipe_ctx);
+
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
+ if (dc->hwss.program_dmdata_engine)
+ dc->hwss.program_dmdata_engine(pipe_ctx);
+ }
+
+ dc->hwss.update_info_frame(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
+ /* enable early control to avoid corruption on DP monitor*/
+ active_total_with_borders =
+ timing->h_addressable
+ + timing->h_border_left
+ + timing->h_border_right;
+
+ if (lane_count != 0)
+ early_control = active_total_with_borders % lane_count;
+
+ if (early_control == 0)
+ early_control = lane_count;
+
+ tg->funcs->set_early_control(tg, early_control);
+
+ /* enable audio only within mode set */
+ if (pipe_ctx->stream_res.audio != NULL) {
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.hpo_dp_stream_enc);
+ else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
+ }
}
-/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
- * This is only used to lock pipes in pipe splitting case with immediate flip
- * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
- * so we get tearing with freesync since we cannot flip multiple pipes
- * atomically.
- * We use GSL for this:
- * - immediate flip: find first available GSL group if not already assigned
- * program gsl with that group, set current OTG as master
- * and always us 0x4 = AND of flip_ready from all pipes
- * - vsync flip: disable GSL if used
- *
- * Groups in stream_res are stored as +1 from HW registers, i.e.
- * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
- * Using a magic value like -1 would require tracking all inits/resets
- */
-void dcn20_setup_gsl_group_as_lock(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enable)
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
{
- struct gsl_params gsl;
- int group_idx;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ bool enable = false;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal)
+ ? dmdata_dp
+ : dmdata_hdmi;
+
+ /* if using dynamic meta, don't set up generic infopackets */
+ if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
+ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
+ enable = true;
+ }
- memset(&gsl, 0, sizeof(struct gsl_params));
+ if (!hubp)
+ return;
- if (enable) {
- /* return if group already assigned since GSL was set up
- * for vsync flip, we would unassign so it can't be "left over"
- */
- if (pipe_ctx->stream_res.gsl_group > 0)
- return;
+ if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
+ return;
- group_idx = find_free_gsl_group(dc);
- ASSERT(group_idx != 0);
- pipe_ctx->stream_res.gsl_group = group_idx;
+ stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
+ hubp->inst, mode);
+}
- /* set gsl group reg field and mark resource used */
- switch (group_idx) {
- case 1:
- gsl.gsl0_en = 1;
- dc->res_pool->gsl_groups.gsl_0 = 1;
- break;
- case 2:
- gsl.gsl1_en = 1;
- dc->res_pool->gsl_groups.gsl_1 = 1;
- break;
- case 3:
- gsl.gsl2_en = 1;
- dc->res_pool->gsl_groups.gsl_2 = 1;
- break;
- default:
- BREAK_TO_DEBUGGER();
- return; // invalid case
- }
- gsl.gsl_master_en = 1;
- } else {
- group_idx = pipe_ctx->stream_res.gsl_group;
- if (group_idx == 0)
- return; // if not in use, just return
+void dcn20_fpga_init_hw(struct dc *dc)
+{
+ int i, j;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct resource_pool *res_pool = dc->res_pool;
+ struct dc_state *context = dc->current_state;
- pipe_ctx->stream_res.gsl_group = 0;
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
- /* unset gsl group reg field and mark resource free */
- switch (group_idx) {
- case 1:
- gsl.gsl0_en = 0;
- dc->res_pool->gsl_groups.gsl_0 = 0;
- break;
- case 2:
- gsl.gsl1_en = 0;
- dc->res_pool->gsl_groups.gsl_1 = 0;
- break;
- case 3:
- gsl.gsl2_en = 0;
- dc->res_pool->gsl_groups.gsl_2 = 0;
- break;
- default:
- BREAK_TO_DEBUGGER();
- return;
- }
- gsl.gsl_master_en = 0;
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ //Enable ability to power gate / don't force power on permanently
+ hws->funcs.enable_power_gating_plane(hws, true);
+
+ // Specific to FPGA dccg and registers
+ REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
+ REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
+
+ hws->funcs.dccg_init(hws);
+
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ if (REG(REFCLK_CNTL))
+ REG_WRITE(REFCLK_CNTL, 0);
+ //
+
+
+ /* Blank pixel data with OPP DPG */
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ dcn20_init_blank(dc, tg);
}
- /* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
- pipe_ctx->stream_res.tg->funcs->set_gsl(
- pipe_ctx->stream_res.tg,
- &gsl);
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
- } else
- BREAK_TO_DEBUGGER();
-}
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->lock(tg);
+ }
-static void dcn20_set_flip_control_gsl(
- struct pipe_ctx *pipe_ctx,
- bool flip_immediate)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dpp *dpp = res_pool->dpps[i];
+
+ dpp->funcs->dpp_reset(dpp);
+ }
+
+ /* Reset all MPCC muxes */
+ res_pool->mpc->funcs->mpc_init(res_pool->mpc);
+
+ /* initialize OPP mpc_tree parameter */
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
+ res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ for (j = 0; j < MAX_PIPES; j++)
+ res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+
+ pipe_ctx->stream_res.tg = tg;
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->plane_res.hubp = hubp;
+ pipe_ctx->plane_res.dpp = dpp;
+ pipe_ctx->plane_res.mpcc_inst = dpp->inst;
+ hubp->mpcc_id = dpp->inst;
+ hubp->opp_id = OPP_ID_INVALID;
+ hubp->power_gated = false;
+ pipe_ctx->stream_res.opp = NULL;
+
+ hubp->funcs->hubp_init(hubp);
+
+ //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
+ /*to do*/
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ }
+
+ /* initialize DWB pointer to MCIF_WB */
+ for (i = 0; i < res_pool->res_cap->num_dwb; i++)
+ res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
+
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->unlock(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ dc->hwss.disable_plane(dc, pipe_ctx);
+
+ pipe_ctx->stream_res.tg = NULL;
+ pipe_ctx->plane_res.hubp = NULL;
+ }
+
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ tg->funcs->tg_init(tg);
+ }
+
+ if (dc->res_pool->hubbub->funcs->init_crb)
+ dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
+}
+#ifndef TRIM_FSFT
+bool dcn20_optimize_timing_for_fsft(struct dc *dc,
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz)
{
- if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
- pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
- pipe_ctx->plane_res.hubp, flip_immediate);
+ unsigned int old_v_front_porch;
+ unsigned int old_v_total;
+ unsigned int max_input_rate_in_100hz;
+ unsigned long long new_v_total;
+
+ max_input_rate_in_100hz = max_input_rate_in_khz * 10;
+ if (max_input_rate_in_100hz < timing->pix_clk_100hz)
+ return false;
+ old_v_total = timing->v_total;
+ old_v_front_porch = timing->v_front_porch;
+
+ timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
+ timing->pix_clk_100hz = max_input_rate_in_100hz;
+
+ new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz);
+
+ timing->v_total = new_v_total;
+ timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total);
+ return true;
}
+#endif
-void dcn20_hw_sequencer_construct(struct dc *dc)
+void dcn20_set_disp_pattern_generator(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ const struct tg_color *solid_color,
+ int width, int height, int offset)
{
- dcn10_hw_sequencer_construct(dc);
- dc->hwss.init_hw = dcn20_init_hw;
- dc->hwss.init_pipes = NULL;
- dc->hwss.unblank_stream = dcn20_unblank_stream;
- dc->hwss.update_plane_addr = dcn20_update_plane_addr;
- dc->hwss.disable_plane = dcn20_disable_plane,
- dc->hwss.enable_stream_timing = dcn20_enable_stream_timing;
- dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
- dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
- dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
- dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface;
- dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
- dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
- dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
- dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth;
- dc->hwss.update_bandwidth = dcn20_update_bandwidth;
- dc->hwss.enable_writeback = dcn20_enable_writeback;
- dc->hwss.disable_writeback = dcn20_disable_writeback;
- dc->hwss.program_output_csc = dcn20_program_output_csc;
- dc->hwss.update_odm = dcn20_update_odm;
- dc->hwss.blank_pixel_data = dcn20_blank_pixel_data;
- dc->hwss.dmdata_status_done = dcn20_dmdata_status_done;
- dc->hwss.disable_stream = dcn20_disable_stream;
- dc->hwss.init_sys_ctx = dcn20_init_sys_ctx;
- dc->hwss.init_vm_ctx = dcn20_init_vm_ctx;
- dc->hwss.disable_stream_gating = dcn20_disable_stream_gating;
- dc->hwss.enable_stream_gating = dcn20_enable_stream_gating;
- dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt;
- dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap;
- dc->hwss.update_mpcc = dcn20_update_mpcc;
- dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl;
- dc->hwss.did_underflow_occur = dcn10_did_underflow_occur;
+ pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
+ color_space, color_depth, solid_color, width, height, offset);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 2b0409454073..33a36c02b2f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -26,78 +26,134 @@
#ifndef __DC_HWSS_DCN20_H__
#define __DC_HWSS_DCN20_H__
-struct dc;
+#include "hw_sequencer_private.h"
-void dcn20_hw_sequencer_construct(struct dc *dc);
-
-enum dc_status dcn20_enable_stream_timing(
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context,
- struct dc *dc);
-
-void dcn20_blank_pixel_data(
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_program_front_end_for_ctx(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool blank);
-
+ struct dc_state *context);
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
void dcn20_program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix,
int opp_id);
-
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
+void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_disable_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn20_blank_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
-
void dcn20_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-
bool dcn20_update_bandwidth(
struct dc *dc,
struct dc_state *context);
-
+void dcn20_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+enum dc_status dcn20_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+void dcn20_disable_vga(
+ struct dce_hwseq *hws);
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable);
+void dcn20_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+void dcn20_hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+void dcn20_program_triple_buffer(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable_triple_buffer);
+void dcn20_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void dcn20_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst);
-
-bool dcn20_hwss_wait_for_blank_complete(
- struct output_pixel_processor *opp);
-
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream);
-
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state);
-
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx);
-
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
+void dcn20_init_vm_ctx(
+ struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_virtual_addr_space_config *va_config,
+ int vmid);
+void dcn20_set_flip_control_gsl(
+ struct pipe_ctx *pipe_ctx,
+ bool flip_immediate);
+void dcn20_dsc_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on);
+void dcn20_fpga_init_hw(struct dc *dc);
+bool dcn20_wait_for_blank_complete(
+ struct output_pixel_processor *opp);
+void dcn20_dccg_init(struct dce_hwseq *hws);
+int dcn20_init_sys_ctx(struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_phy_addr_space_config *pa_config);
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+#ifndef TRIM_FSFT
+bool dcn20_optimize_timing_for_fsft(struct dc *dc,
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz);
+#endif
-void dcn20_program_tripleBuffer(
- const struct dc *dc,
+void dcn20_set_disp_pattern_generator(const struct dc *dc,
struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer);
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ const struct tg_color *solid_color,
+ int width, int height, int offset);
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx);
+void dcn20_update_visual_confirm_color(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color,
+ int mpcc_id);
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-void dcn20_setup_gsl_group_as_lock(const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enable);
-void dcn20_pipe_control_lock(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn20_enable_plane(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context);
#endif /* __DC_HWSS_DCN20_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
new file mode 100644
index 000000000000..91e4885b743e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_hwseq.h"
+
+#include "dcn20_init.h"
+
+static const struct hw_sequencer_funcs dcn20_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .power_down_on_boot = dcn10_power_down_on_boot,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_vblanks_synchronization = dcn10_enable_vblanks_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn20_enable_writeback,
+ .disable_writeback = dcn20_disable_writeback,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn20_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn20_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
+#ifndef TRIM_FSFT
+ .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
+#endif
+ .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .get_dcc_en_bits = dcn10_get_dcc_en_bits,
+ .update_visual_confirm_color = dcn20_update_visual_confirm_color
+};
+
+static const struct hwseq_private_funcs dcn20_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn20_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn20_funcs;
+ dc->hwseq->funcs = dcn20_private_funcs;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
new file mode 100644
index 000000000000..12277797cd71
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN20_INIT_H__
+#define __DC_DCN20_INIT_H__
+
+struct dc;
+
+void dcn20_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index f495582e9e87..2f9bfaeaba8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -49,6 +49,12 @@
#define IND_REG(index) \
(enc10->link_regs->index)
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
static struct mpll_cfg dcn2_mpll_cfg[] = {
// RBR
@@ -168,10 +174,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = {
void enc2_fec_set_enable(struct link_encoder *enc, bool enable)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DC_LOG_DSC("%s FEC at link encoder inst %d",
enable ? "Enabling" : "Disabling", enc->id.enum_id);
-#endif
REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable);
}
@@ -192,7 +196,6 @@ bool enc2_fec_is_active(struct link_encoder *enc)
return (active != 0);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* this function reads dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
@@ -203,8 +206,8 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s)
REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status);
+ REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete);
}
-#endif
static bool update_cfg_data(
struct dcn10_link_encoder *enc10,
@@ -263,6 +266,38 @@ void dcn20_link_encoder_enable_dp_output(
}
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t is_in_usb_c_dp4_mode = 0;
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+ /* in usb c dp2 mode, max lane count is 2 */
+ if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+ }
+
+}
+
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable = 0;
+ bool is_usb_c_alt_mode = false;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ /* if value == 1 alt mode is disabled, otherwise it is enabled */
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+ }
+
+ return is_usb_c_alt_mode;
+}
+
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
@@ -274,7 +309,6 @@ void dcn20_link_encoder_enable_dp_output(
void enc2_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-
/*
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
@@ -298,9 +332,17 @@ void enc2_hw_init(struct link_encoder *enc)
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
*/
- AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+ if (enc->ctx->dc_bios->golden_table.dc_golden_table_ver > 0) {
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control0_val);
+
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, enc->ctx->dc_bios->golden_table.aux_dphy_tx_control_val);
+
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL1, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control1_val);
+ } else {
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
- AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+ }
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
@@ -315,14 +357,12 @@ void enc2_hw_init(struct link_encoder *enc)
}
static const struct link_encoder_funcs dcn20_link_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.read_state = link_enc2_read_state,
-#endif
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
.setup = dcn10_link_encoder_setup,
- .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa,
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
.disable_output = dcn10_link_encoder_disable_output,
@@ -341,7 +381,10 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
.fec_set_enable = enc2_fec_set_enable,
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
+ .get_dig_mode = dcn10_get_dig_mode,
.get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn20_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 3736b5548a25..b2b266953d18 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -33,7 +33,143 @@
SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id)
#define UNIPHY_MASK_SH_LIST(mask_sh)\
- LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh)
+ LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh)
+
+#define DPCS_MASK_SH_LIST(mask_sh)\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh)
+
+#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
+ DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
@@ -57,12 +193,56 @@
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\
- LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh)
+ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
#define UNIPHY_DCN2_REG_LIST(id) \
SRI(CLOCK_ENABLE, SYMCLK, id), \
SRI(CHANNEL_XBAR_CNTL, UNIPHY, id)
+#define DPCS_DCN2_CMN_REG_LIST(id) \
+ SRI(DIG_LANE_ENABLE, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+ SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+ SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+
+#define DPCS_DCN2_REG_LIST(id) \
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define LE_DCN2_REG_LIST(id) \
+ LE_DCN10_REG_LIST(id), \
+ SR(DCIO_SOFT_RESET)
+
struct mpll_cfg {
uint32_t mpllb_ana_v2i;
uint32_t mpllb_ana_freq_vco;
@@ -91,6 +271,15 @@ struct mpll_cfg {
uint32_t ref_range;
uint32_t ref_clk;
bool hdmimode_enable;
+ bool sup_pre_hp;
+ bool dp_tx0_vergdrv_byp;
+ bool dp_tx1_vergdrv_byp;
+ bool dp_tx2_vergdrv_byp;
+ bool dp_tx3_vergdrv_byp;
+ uint32_t tx_peaking_lvl;
+ uint32_t ctr_reqs_pll;
+
+
};
struct dpcssys_phy_seq_cfg {
@@ -151,15 +340,17 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready);
bool enc2_fec_is_active(struct link_encoder *enc);
void enc2_hw_init(struct link_encoder *enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s);
-#endif
void dcn20_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
void dcn20_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
index 3fccd5eeecbb..7bcee5894d2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
@@ -36,26 +36,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SR(reg_name)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SRI2(reg_name, block, id)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
-
#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 240749e4cf83..15734db0cdea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -33,6 +33,9 @@
#define REG(reg)\
mpc20->mpc_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
mpc20->base.ctx
@@ -64,7 +67,6 @@ void mpc2_update_blending(
REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain);
REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain);
- mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
mpcc->blnd_cfg = *blnd_cfg;
}
@@ -132,19 +134,33 @@ void mpc2_set_output_csc(
const uint16_t *regval,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct color_matrices_reg ocsc_regs;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
-
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -157,10 +173,13 @@ void mpc2_set_output_csc(
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
+
cm_helper_program_color_matrices(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
void mpc2_set_ocsc_default(
@@ -169,14 +188,16 @@ void mpc2_set_ocsc_default(
enum dc_color_space color_space,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
uint32_t arr_size;
struct color_matrices_reg ocsc_regs;
const uint16_t *regval = NULL;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
regval = find_color_matrix(color_space, &arr_size);
@@ -185,6 +206,19 @@ void mpc2_set_ocsc_default(
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -203,6 +237,8 @@ void mpc2_set_ocsc_default(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
static void mpc2_ogam_get_reg_field(
@@ -233,14 +269,14 @@ static void mpc2_ogam_get_reg_field(
reg->masks.exp_resion_start_segment = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
}
-static void mpc20_power_on_ogam_lut(
+void mpc20_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
- MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+ MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
}
@@ -345,6 +381,9 @@ static void mpc20_program_ogam_pwl(
uint32_t i;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ PERF_TRACE();
+ REG_SEQ_START();
+
for (i = 0 ; i < num; i++) {
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
@@ -361,13 +400,17 @@ static void mpc20_program_ogam_pwl(
}
-void apply_DEDCN20_305_wa(
- struct mpc *mpc,
- int mpcc_id, enum dc_lut_mode current_mode,
- enum dc_lut_mode next_mode)
+static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
+ enum dc_lut_mode current_mode,
+ enum dc_lut_mode next_mode)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ if (mpc->ctx->dc->debug.cm_in_bypass) {
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+
if (mpc->ctx->dc->work_arounds.dedcn20_305_wa == false) {
/*hw fixed in new review*/
return;
@@ -390,10 +433,16 @@ void mpc2_set_output_gamma(
enum dc_lut_mode next_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ if (mpc->ctx->dc->debug.cm_in_bypass) {
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+
if (params == NULL) {
REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
return;
}
+
current_mode = mpc20_get_ogam_current(mpc, mpcc_id);
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
next_mode = LUT_RAM_B;
@@ -401,7 +450,7 @@ void mpc2_set_output_gamma(
next_mode = LUT_RAM_A;
mpc20_power_on_ogam_lut(mpc, mpcc_id, true);
- mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A ? true:false);
+ mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
mpc2_program_luta(mpc, mpcc_id, params);
@@ -435,24 +484,28 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
unsigned int top_sel, mpc_busy, mpc_idle, mpc_disabled;
- REG_GET(MPCC_STATUS[mpcc_id], MPCC_DISABLED, &mpc_disabled);
-
- if (mpc_disabled) {
- ASSERT(0);
- return;
- }
REG_GET(MPCC_TOP_SEL[mpcc_id],
MPCC_TOP_SEL, &top_sel);
- if (top_sel == 0xf) {
- REG_GET_2(MPCC_STATUS[mpcc_id],
- MPCC_BUSY, &mpc_busy,
- MPCC_IDLE, &mpc_idle);
+ REG_GET_3(MPCC_STATUS[mpcc_id],
+ MPCC_BUSY, &mpc_busy,
+ MPCC_IDLE, &mpc_idle,
+ MPCC_DISABLED, &mpc_disabled);
- ASSERT(mpc_busy == 0);
- ASSERT(mpc_idle == 1);
+ if (top_sel == 0xf) {
+ ASSERT(!mpc_busy);
+ ASSERT(mpc_idle);
+ ASSERT(mpc_disabled);
+ } else {
+ ASSERT(!mpc_disabled);
+ ASSERT(!mpc_idle);
}
+
+ REG_SEQ_SUBMIT();
+ PERF_TRACE();
+ REG_SEQ_WAIT_DONE();
+ PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
@@ -471,7 +524,7 @@ static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
mpcc->sm_cfg.enable = false;
}
-struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
+static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
{
struct mpcc *tmp_mpcc = tree->opp_list;
@@ -488,7 +541,9 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
+ .mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
.wait_for_idle = mpc2_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
@@ -498,6 +553,9 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.set_output_csc = mpc2_set_output_csc,
.set_ocsc_default = mpc2_set_ocsc_default,
.set_output_gamma = mpc2_set_output_gamma,
+ .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .set_bg_color = mpc1_set_bg_color,
};
void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
index 9750095d2d73..496658f420db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -80,6 +80,10 @@
SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\
SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst)
+#define MPC_DBG_REG_LIST_DCN2_0() \
+ SR(MPC_OCSC_TEST_DEBUG_DATA),\
+ SR(MPC_OCSC_TEST_DEBUG_INDEX)
+
#define MPC_REG_VARIABLE_LIST_DCN2_0 \
MPC_COMMON_REG_VARIABLE_LIST \
uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \
@@ -118,6 +122,8 @@
uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\
uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\
uint32_t MPCC_OGAM_MODE[MAX_MPCC];\
+ uint32_t MPC_OCSC_TEST_DEBUG_DATA;\
+ uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\
uint32_t CSC_MODE[MAX_OPP]; \
uint32_t CSC_C11_C12_A[MAX_OPP]; \
uint32_t CSC_C33_C34_A[MAX_OPP]; \
@@ -134,6 +140,7 @@
SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+ SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\
SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
@@ -159,6 +166,7 @@
SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\
SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\
SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\
+ SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\
SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\
SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_WRITE_EN_MASK, mask_sh),\
SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_RAM_SEL, mask_sh),\
@@ -171,7 +179,22 @@
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
- SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+ SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
+
+/*
+ * DCN2 MPC_OCSC debug status register:
+ *
+ * Status index including current OCSC Mode is 1
+ * OCSC Mode: [1..0]
+ */
+#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1
+
+#define MPC_DEBUG_REG_LIST_SH_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0
+
+#define MPC_DEBUG_REG_LIST_MASK_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3
#define MPC_REG_FIELD_LIST_DCN2_0(type) \
MPC_REG_FIELD_LIST(type)\
@@ -180,6 +203,8 @@
type MPCC_TOP_GAIN;\
type MPCC_BOT_GAIN_INSIDE;\
type MPCC_BOT_GAIN_OUTSIDE;\
+ type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\
+ type MPC_OCSC_TEST_DEBUG_INDEX;\
type MPC_OCSC_MODE;\
type MPC_OCSC_C11_A;\
type MPC_OCSC_C12_A;\
@@ -217,7 +242,8 @@
type MPC_OUT_DENORM_CLAMP_MIN_G_Y;\
type MPC_OUT_DENORM_CLAMP_MAX_B_CB;\
type MPC_OUT_DENORM_CLAMP_MIN_B_CB;\
- type MPCC_DISABLED;
+ type MPCC_DISABLED;\
+ type MPCC_OGAM_MEM_PWR_DIS;
struct dcn20_mpc_registers {
MPC_REG_VARIABLE_LIST_DCN2_0
@@ -282,4 +308,5 @@ void mpc2_set_output_gamma(
void mpc2_assert_idle_mpcc(struct mpc *mpc, int id);
void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id);
+void mpc20_power_on_ogam_lut(struct mpc *mpc, int mpcc_id, bool power_on);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index d9e7c711a71c..0784d0198661 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -41,10 +41,12 @@
void opp2_set_disp_pattern_generator(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height)
+ int height,
+ int offset)
{
struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
enum test_pattern_color_format bit_depth;
@@ -91,6 +93,11 @@ void opp2_set_disp_pattern_generator(
DPG_ACTIVE_WIDTH, width,
DPG_ACTIVE_HEIGHT, height);
+ /* set DPG offset */
+ REG_SET_2(DPG_OFFSET_SEGMENT, 0,
+ DPG_X_OFFSET, offset,
+ DPG_SEGMENT_WIDTH, 0);
+
switch (test_pattern) {
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
@@ -100,9 +107,22 @@ void opp2_set_disp_pattern_generator(
TEST_PATTERN_DYN_RANGE_CEA :
TEST_PATTERN_DYN_RANGE_VESA);
+ switch (color_space) {
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR601:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR709:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_RGB:
+ default:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+ break;
+ }
+
REG_UPDATE_6(DPG_CONTROL,
DPG_EN, 1,
- DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB,
+ DPG_MODE, mode,
DPG_DYNAMIC_RANGE, dyn_range,
DPG_BIT_DEPTH, bit_depth,
DPG_VRES, 6,
@@ -270,6 +290,17 @@ void opp2_set_disp_pattern_generator(
}
}
+void opp2_program_dpg_dimensions(
+ struct output_pixel_processor *opp,
+ int width, int height)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ REG_SET_2(DPG_DIMENSIONS, 0,
+ DPG_ACTIVE_WIDTH, width,
+ DPG_ACTIVE_HEIGHT, height);
+}
+
void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp,
const struct tg_color *color)
@@ -330,9 +361,9 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_program_stereo = opp1_program_stereo,
.opp_pipe_clock_control = opp1_pipe_clock_control,
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
+ .opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
- .opp_convert_pti = NULL,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index abd8de9a78f8..3ab221bdd27d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -36,6 +36,7 @@
#define OPP_DPG_REG_LIST(id) \
SRI(DPG_CONTROL, DPG, id), \
SRI(DPG_DIMENSIONS, DPG, id), \
+ SRI(DPG_OFFSET_SEGMENT, DPG, id), \
SRI(DPG_COLOUR_B_CB, DPG, id), \
SRI(DPG_COLOUR_G_Y, DPG, id), \
SRI(DPG_COLOUR_R_CR, DPG, id), \
@@ -53,6 +54,7 @@
uint32_t FMT_422_CONTROL; \
uint32_t DPG_CONTROL; \
uint32_t DPG_DIMENSIONS; \
+ uint32_t DPG_OFFSET_SEGMENT; \
uint32_t DPG_COLOUR_B_CB; \
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
@@ -68,6 +70,8 @@
OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \
@@ -97,6 +101,8 @@
type DPG_HRES; \
type DPG_ACTIVE_WIDTH; \
type DPG_ACTIVE_HEIGHT; \
+ type DPG_X_OFFSET; \
+ type DPG_SEGMENT_WIDTH; \
type DPG_COLOUR0_R_CR; \
type DPG_COLOUR1_R_CR; \
type DPG_COLOUR0_B_CB; \
@@ -140,10 +146,16 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20,
void opp2_set_disp_pattern_generator(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
+
+void opp2_program_dpg_dimensions(
+ struct output_pixel_processor *opp,
+ int width, int height);
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 26a66ccf6e72..0340fdd3f5fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -59,27 +59,17 @@ bool optc2_enable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
+ REG_SEQ_START();
+
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
- return true;
-}
-
-/**
- * DRR double buffering control to select buffer point
- * for V_TOTAL, H_TOTAL, VTOTAL_MIN, VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers
- * Options: anytime, start of frame, dp start of frame (range timing)
- */
-void optc2_set_timing_db_mode(struct timing_generator *optc, bool enable)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- uint32_t blank_data_double_buffer_enable = enable ? 1 : 0;
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
- REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
- OTG_RANGE_TIMING_DBUF_UPDATE_MODE, blank_data_double_buffer_enable);
+ return true;
}
/**
@@ -105,46 +95,6 @@ void optc2_set_gsl(struct timing_generator *optc,
}
-/* Use the gsl allow flip as the master update lock */
-void optc2_use_gsl_as_master_update_lock(struct timing_generator *optc,
- const struct gsl_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_UPDATE(OTG_GSL_CONTROL,
- OTG_MASTER_UPDATE_LOCK_GSL_EN, params->master_update_lock_gsl_en);
-}
-
-/* You can control the GSL timing by limiting GSL to a window (X,Y) */
-void optc2_set_gsl_window(struct timing_generator *optc,
- const struct gsl_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_2(OTG_GSL_WINDOW_X, 0,
- OTG_GSL_WINDOW_START_X, params->gsl_window_start_x,
- OTG_GSL_WINDOW_END_X, params->gsl_window_end_x);
- REG_SET_2(OTG_GSL_WINDOW_Y, 0,
- OTG_GSL_WINDOW_START_Y, params->gsl_window_start_y,
- OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y);
-}
-
-/**
- * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
- * Start offset begins with vstartup and goes for x number of clocks,
- * end offset starts from end of vupdate to x number of clocks.
- */
-void optc2_set_vupdate_keepout(struct timing_generator *optc,
- const struct vupdate_keepout_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
- MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
- MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
- OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
-}
-
void optc2_set_gsl_source_select(
struct timing_generator *optc,
int group_idx,
@@ -167,19 +117,6 @@ void optc2_set_gsl_source_select(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */
-void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc,
- int x_position,
- int line_num)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_2(OTG_DSC_START_POSITION, 0,
- OTG_DSC_START_POSITION_X, x_position,
- OTG_DSC_START_POSITION_LINE_NUM, line_num);
-}
-
/* Set DSC-related configuration.
* dsc_mode: 0 disables DSC, other values enable DSC in specified format
* sc_bytes_per_pixel: Bytes per pixel in u3.28 format
@@ -191,15 +128,6 @@ void optc2_set_dsc_config(struct timing_generator *optc,
uint32_t dsc_slice_width)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t data_format = 0;
- /* skip if dsc mode is not changed */
- data_format = dm_read_reg(CTX, REG(OPTC_DATA_FORMAT_CONTROL));
-
- data_format = data_format & 0x30; /* bit5:4 */
- data_format = data_format >> 4;
-
- if (data_format == dsc_mode)
- return;
REG_UPDATE(OPTC_DATA_FORMAT_CONTROL,
OPTC_DSC_MODE, dsc_mode);
@@ -210,13 +138,25 @@ void optc2_set_dsc_config(struct timing_generator *optc,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
}
-#endif
-/**
- * PTI i think is already done somewhere else for 2ka
- * (opp?, please double check.
- * OPTC side only has 1 register to set for PTI_ENABLE)
+/* Get DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
*/
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OPTC_DATA_FORMAT_CONTROL,
+ OPTC_DSC_MODE, dsc_mode);
+}
+
+
+/*TEMP: Need to figure out inheritance model here.*/
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return optc1_is_two_pixels_per_containter(timing);
+}
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
@@ -224,27 +164,29 @@ void optc2_set_odm_bypass(struct timing_generator *optc,
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t h_div_2 = 0;
- optc1->comb_opp_id = 0xf;
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 0,
OPTC_SEG0_SRC_SEL, optc->inst,
OPTC_SEG1_SRC_SEL, 0xf);
REG_WRITE(OTG_H_TIMING_CNTL, 0);
- h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing);
+ h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div_2);
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, 0);
+ optc1->opp_count = 1;
}
-void optc2_set_odm_combine(struct timing_generator *optc, int combine_opp_id,
- int mpcc_hactive, enum dc_pixel_encoding pixel_encoding)
+void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */
- int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf;
- uint32_t data_fmt = 0;
+ int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
+ / opp_cnt;
+ uint32_t memory_mask;
+
+ ASSERT(opp_cnt == 2);
/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
* REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
@@ -253,27 +195,28 @@ void optc2_set_odm_combine(struct timing_generator *optc, int combine_opp_id,
* MASTER_UPDATE_LOCK_DB_X, 160,
* MASTER_UPDATE_LOCK_DB_Y, 240);
*/
- if (REG(OPTC_MEMORY_CONFIG))
- REG_SET(OPTC_MEMORY_CONFIG, 0,
- OPTC_MEM_SEL, memory_mask << (optc->inst * 4));
- if (pixel_encoding == PIXEL_ENCODING_YCBCR422)
- data_fmt = 1;
- else if (pixel_encoding == PIXEL_ENCODING_YCBCR420)
- data_fmt = 2;
+ /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
+ * however, for ODM combine we can simplify by always using 4.
+ * To make sure there's no overlap, each instance "reserves" 2 memories and
+ * they are uniquely combined here.
+ */
+ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
- REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
+ if (REG(OPTC_MEMORY_CONFIG))
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, memory_mask);
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
- OPTC_SEG0_SRC_SEL, optc->inst,
- OPTC_SEG1_SRC_SEL, combine_opp_id);
+ OPTC_SEG0_SRC_SEL, opp_id[0],
+ OPTC_SEG1_SRC_SEL, opp_id[1]);
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_BY2, 1);
- optc1->comb_opp_id = combine_opp_id;
+ optc1->opp_count = opp_cnt;
}
void optc2_get_optc_source(struct timing_generator *optc,
@@ -293,10 +236,14 @@ void optc2_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 2;
else
*num_of_src_opp = 1;
+
+ /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
+ if (*src_opp_id_1 == 0xf)
+ *num_of_src_opp = 1;
}
-void optc2_set_dwb_source(struct timing_generator *optc,
- uint32_t dwb_pipe_inst)
+static void optc2_set_dwb_source(struct timing_generator *optc,
+ uint32_t dwb_pipe_inst)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -308,94 +255,158 @@ void optc2_set_dwb_source(struct timing_generator *optc,
OPTC_DWB1_SOURCE_SELECT, optc->inst);
}
-void optc2_triplebuffer_lock(struct timing_generator *optc)
+static void optc2_align_vblanks(
+ struct timing_generator *optc_master,
+ struct timing_generator *optc_slave,
+ uint32_t master_pixel_clock_100Hz,
+ uint32_t slave_pixel_clock_100Hz,
+ uint8_t master_clock_divider,
+ uint8_t slave_clock_divider)
{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
+ /* accessing slave OTG registers */
+ struct optc *optc1 = DCN10TG_FROM_TG(optc_slave);
+
+ uint32_t master_v_active = 0;
+ uint32_t master_h_total = 0;
+ uint32_t slave_h_total = 0;
+ uint64_t L, XY;
+ uint32_t X, Y, p = 10000;
+ uint32_t master_update_lock;
+
+ /* disable slave OTG */
+ REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
+ /* wait until disabled */
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 0, 10, 5000);
+
+ REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &slave_h_total);
+
+ /* assign slave OTG to be controlled by master update lock */
REG_SET(OTG_GLOBAL_CONTROL0, 0,
- OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
+ OTG_MASTER_UPDATE_LOCK_SEL, optc_master->inst);
- REG_SET(OTG_VUPDATE_KEEPOUT, 0,
- OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+ /* accessing master OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_master);
+ /* saving update lock state, not sure if it's needed */
+ REG_GET(OTG_MASTER_UPDATE_LOCK,
+ OTG_MASTER_UPDATE_LOCK, &master_update_lock);
+ /* unlocking master OTG */
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
- OTG_MASTER_UPDATE_LOCK, 1);
+ OTG_MASTER_UPDATE_LOCK, 0);
+
+ REG_GET(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &master_v_active);
+ REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &master_h_total);
+
+ /* calculate when to enable slave OTG */
+ L = (uint64_t)p * slave_h_total * master_pixel_clock_100Hz;
+ L = div_u64(L, master_h_total);
+ L = div_u64(L, slave_pixel_clock_100Hz);
+ XY = div_u64(L, p);
+ Y = master_v_active - XY - 1;
+ X = div_u64(((XY + 1) * p - L) * master_h_total, p * master_clock_divider);
+
+ /*
+ * set master OTG to unlock when V/H
+ * counters reach calculated values
+ */
+ REG_UPDATE(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_EN, 1);
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_X,
+ X,
+ MASTER_UPDATE_LOCK_DB_Y,
+ Y);
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_MASTER_UPDATE_LOCK,
- UPDATE_LOCK_STATUS, 1,
- 1, 10);
-}
+ /* lock master OTG */
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1, 1, 10);
-void optc2_triplebuffer_unlock(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ /* accessing slave OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_slave);
- REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
- OTG_MASTER_UPDATE_LOCK, 0);
+ /*
+ * enable slave OTG, the OTG is locked with
+ * master's update lock, so it will not run
+ */
+ REG_UPDATE(OTG_CONTROL,
+ OTG_MASTER_EN, 1);
- REG_SET(OTG_VUPDATE_KEEPOUT, 0,
- OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0);
+ /* accessing master OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_master);
-}
+ /*
+ * unlock master OTG. When master H/V counters reach
+ * DB_XY point, slave OTG will start
+ */
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+ /* accessing slave OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_slave);
-void optc2_setup_global_lock(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t v_blank_start = 0;
- uint32_t h_blank_start = 0, h_total = 0;
+ /* wait for slave OTG to start running*/
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 1, 10, 5000);
- REG_SET(OTG_GLOBAL_CONTROL1, 0, MASTER_UPDATE_LOCK_DB_EN, 1);
+ /* accessing master OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_master);
- REG_SET(OTG_GLOBAL_CONTROL2, 0, DIG_UPDATE_LOCATION, 20);
+ /* disable the XY point*/
+ REG_UPDATE(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_EN, 0);
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_X,
+ 0,
+ MASTER_UPDATE_LOCK_DB_Y,
+ 0);
- REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START, &v_blank_start);
+ /*restore master update lock*/
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, master_update_lock);
- REG_GET(OTG_H_BLANK_START_END, OTG_H_BLANK_START, &h_blank_start);
+ /* accessing slave OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_slave);
+ /* restore slave to be controlled by it's own */
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, optc_slave->inst);
- REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &h_total);
- REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
- MASTER_UPDATE_LOCK_DB_X,
- h_blank_start - 200 - 1,
- MASTER_UPDATE_LOCK_DB_Y,
- v_blank_start - 1);
}
-void optc2_lock_global(struct timing_generator *optc)
+void optc2_triplebuffer_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1);
-
REG_SET(OTG_GLOBAL_CONTROL0, 0,
- OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
+
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
- OTG_MASTER_UPDATE_LOCK, 1);
+ OTG_MASTER_UPDATE_LOCK, 1);
- /* Should be fast, status does not update on maximus */
if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
}
-void optc2_lock(struct timing_generator *optc)
+void optc2_triplebuffer_unlock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0);
-
- REG_SET(OTG_GLOBAL_CONTROL0, 0,
- OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
- OTG_MASTER_UPDATE_LOCK, 1);
+ OTG_MASTER_UPDATE_LOCK, 0);
+
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0);
- /* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_MASTER_UPDATE_LOCK,
- UPDATE_LOCK_STATUS, 1,
- 1, 10);
}
void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
@@ -415,9 +426,14 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
- h_blank_start - 200 - 1,
+ (h_blank_start - 200 - 1) / optc1->opp_count,
MASTER_UPDATE_LOCK_DB_Y,
v_blank_start - 1);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
}
void optc2_lock_doublebuffer_disable(struct timing_generator *optc)
@@ -440,14 +456,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
- MANUAL_FLOW_CONTROL, 1);
-
- REG_SET(OTG_GLOBAL_CONTROL2, 0,
- MANUAL_FLOW_CONTROL_SEL, optc->inst);
-
REG_SET_8(OTG_TRIGA_CNTL, 0,
- OTG_TRIGA_SOURCE_SELECT, 22,
+ OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
@@ -465,6 +475,26 @@ void optc2_program_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_MANUAL_TRIG, 1);
}
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_2(OTG_CRC_CNTL2, 0,
+ OTG_CRC_DSC_MODE, params->dsc_mode,
+ OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode);
+
+ return optc1_configure_crc(optc, params);
+}
+
+
+void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *refresh_rate)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate);
+}
+
static struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -492,14 +522,13 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.triplebuffer_lock = optc2_triplebuffer_lock,
.triplebuffer_unlock = optc2_triplebuffer_unlock,
.disable_reset_trigger = optc1_disable_reset_trigger,
- .lock = optc2_lock,
+ .lock = optc1_lock,
.unlock = optc1_unlock,
- .lock_global = optc2_lock_global,
- .setup_global_lock = optc2_setup_global_lock,
.lock_doublebuffer_enable = optc2_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc2_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
+ .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
@@ -510,10 +539,9 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
- .configure_crc = optc1_configure_crc,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ .configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
-#endif
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
.set_odm_combine = optc2_set_odm_combine,
@@ -522,7 +550,9 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.set_gsl_source_select = optc2_set_gsl_source_select,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
- .setup_manual_trigger = optc2_setup_manual_trigger
+ .setup_manual_trigger = optc2_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+ .align_vblanks = optc2_align_vblanks,
};
void dcn20_timing_generator_init(struct optc *optc1)
@@ -535,8 +565,6 @@ void dcn20_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
- optc1->min_h_sync_width = 8;
+ optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue.
optc1->min_v_sync_width = 1;
- optc1->comb_opp_id = 0xf;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index ebf07c582da2..f7968b9ca16e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -36,12 +36,14 @@
SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OTG_CRC_CNTL2, OTG, inst),\
SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
SR(DWB_SOURCE_SELECT),\
- SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst)
+ SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \
+ SRI(OTG_DRR_CONTROL, OTG, inst)
#define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
@@ -55,13 +57,14 @@
SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
- SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
- SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
- SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
@@ -73,10 +76,14 @@
SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\
SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\
SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\
- SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh)
+ SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh), \
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
void dcn20_timing_generator_init(struct optc *optc);
+void optc2_get_last_used_drr_vtotal(struct timing_generator *optc,
+ uint32_t *refresh_rate);
+
bool optc2_enable_crtc(struct timing_generator *optc);
void optc2_set_gsl(struct timing_generator *optc,
@@ -86,18 +93,19 @@ void optc2_set_gsl_source_select(struct timing_generator *optc,
int group_idx,
uint32_t gsl_ready_signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void optc2_set_dsc_config(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
-#endif
+
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode);
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
-void optc2_set_odm_combine(struct timing_generator *optc, int combine_opp_id,
- int mpcc_hactive, enum dc_pixel_encoding pixel_encoding);
+void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing);
void optc2_get_optc_source(struct timing_generator *optc,
uint32_t *num_of_src_opp,
@@ -106,11 +114,11 @@ void optc2_get_optc_source(struct timing_generator *optc,
void optc2_triplebuffer_lock(struct timing_generator *optc);
void optc2_triplebuffer_unlock(struct timing_generator *optc);
-void optc2_lock(struct timing_generator *optc);
-void optc2_lock_global(struct timing_generator *optc);
-void optc2_setup_global_lock(struct timing_generator *optc);
void optc2_lock_doublebuffer_disable(struct timing_generator *optc);
void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
+void optc2_setup_manual_trigger(struct timing_generator *optc);
void optc2_program_manual_trigger(struct timing_generator *optc);
-
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params);
#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d200bc3cec71..7802d603f796 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1,5 +1,6 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -28,10 +29,14 @@
#include "dm_services.h"
#include "dc.h"
+#include "dcn20_init.h"
+
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn20/dcn20_fpu.h"
+
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
#include "dcn20_hubbub.h"
@@ -45,9 +50,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn20_opp.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dcn20_dsc.h"
-#endif
#include "dcn20_link_encoder.h"
#include "dcn20_stream_encoder.h"
@@ -59,11 +62,15 @@
#include "dml/display_mode_vba.h"
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
+#include "dc_link_ddc.h"
+#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "dpcs/dpcs_2_0_0_offset.h"
+#include "dpcs/dpcs_2_0_0_sh_mask.h"
#include "nbio/nbio_2_3_offset.h"
@@ -79,86 +86,12 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "vm_helper.h"
+#include "link_enc_cfg.h"
#include "amdgpu_socbb.h"
-#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
-struct _vcs_dpi_ip_params_st dcn2_0_ip = {
- .odm_capable = 1,
- .gpuvm_enable = 0,
- .hostvm_enable = 0,
- .gpuvm_max_page_table_levels = 4,
- .hostvm_max_page_table_levels = 4,
- .hostvm_cached_page_table_levels = 0,
- .pte_group_size_bytes = 2048,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- .num_dsc = 6,
-#else
- .num_dsc = 0,
-#endif
- .rob_buffer_size_kbytes = 168,
- .det_buffer_size_kbytes = 164,
- .dpte_buffer_size_in_pte_reqs_luma = 84,
- .pde_proc_buffer_size_64k_reqs = 48,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .pte_chunk_size_kbytes = 2,
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 2,
- .line_buffer_size_bits = 789504,
- .is_line_buffer_bpp_fixed = 0,
- .line_buffer_fixed_bpp = 0,
- .dcc_supported = true,
- .max_line_buffer_lines = 12,
- .writeback_luma_buffer_size_kbytes = 12,
- .writeback_chroma_buffer_size_kbytes = 8,
- .writeback_chroma_line_buffer_width_pixels = 4,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 12,
- .writeback_max_vscl_taps = 12,
- .writeback_line_buffer_luma_buffer_size = 0,
- .writeback_line_buffer_chroma_buffer_size = 14643,
- .cursor_buffer_size = 8,
- .cursor_chunk_size = 2,
- .max_num_otg = 6,
- .max_num_dpp = 6,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 8,
- .max_vscl_ratio = 8,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.10,
- .min_vblank_lines = 32, //
- .dppclk_delay_subtotal = 77, //
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_scl = 50,
- .dppclk_delay_cnvc_formatter = 8,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 87, //
- .dcfclk_cstate_latency = 10, // SRExitTime
- .max_inter_dcn_tile_repeaters = 8,
-
- .xfc_supported = true,
- .xfc_fill_bw_overhead_percent = 10.0,
- .xfc_fill_constant_bytes = 0,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { 0 };
-
-
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
@@ -217,6 +150,10 @@ enum dcn20_clk_src_array_id {
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
@@ -314,7 +251,7 @@ static const struct dce_audio_shift audio_shift = {
DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
};
-static const struct dce_aduio_mask audio_mask = {
+static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
@@ -373,6 +310,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
[id] = {\
LE_DCN10_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
@@ -386,11 +324,25 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
};
static const struct dcn10_link_enc_shift le_shift = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN2_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN2_MASK_SH_LIST(_MASK)
+};
+
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
};
#define ipp_regs(id)\
@@ -457,6 +409,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
@@ -469,11 +422,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
};
static const struct dcn2_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
- TF_REG_LIST_SH_MASK_DCN20(_MASK)
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN20
};
#define dwbc_regs_dcn2(id)\
@@ -523,14 +478,17 @@ static const struct dcn20_mpc_registers mpc_regs = {
MPC_OUT_MUX_REG_LIST_DCN2_0(3),
MPC_OUT_MUX_REG_LIST_DCN2_0(4),
MPC_OUT_MUX_REG_LIST_DCN2_0(5),
+ MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define tg_regs(id)\
@@ -620,7 +578,42 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
@@ -642,7 +635,6 @@ static const struct dcn20_dsc_shift dsc_shift = {
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-#endif
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN2()
@@ -666,9 +658,7 @@ static const struct resource_caps res_cap_nv10 = {
.num_dwb = 1,
.num_ddc = 6,
.num_vmid = 16,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 6,
-#endif
};
static const struct dc_plane_cap plane_cap = {
@@ -680,7 +670,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -693,17 +684,31 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 1
- }
+ },
+ 16,
+ 16
+};
+static const struct resource_caps res_cap_nv14 = {
+ .num_timing_generator = 5,
+ .num_opp = 5,
+ .num_video_plane = 5,
+ .num_audio = 6,
+ .num_stream_encoder = 5,
+ .num_pll = 5,
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 16,
+ .num_dsc = 5,
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
@@ -711,12 +716,11 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
- .disable_tri_buf = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -728,6 +732,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.disable_stutter = true,
.scl_reset_length10 = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_tri_buf = true,
};
void dcn20_dpp_destroy(struct dpp **dpp)
@@ -741,7 +746,7 @@ struct dpp *dcn20_dpp_create(
uint32_t inst)
{
struct dcn20_dpp *dpp =
- kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
if (!dpp)
return NULL;
@@ -759,7 +764,7 @@ struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
if (!ipp) {
BREAK_TO_DEBUGGER();
@@ -776,7 +781,7 @@ struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
- kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
if (!opp) {
BREAK_TO_DEBUGGER();
@@ -793,14 +798,17 @@ struct dce_aux *dcn20_aux_engine_create(
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -828,7 +836,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
if (!dce_i2c_hw)
return NULL;
@@ -841,7 +849,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
struct mpc *dcn20_mpc_create(struct dc_context *ctx)
{
struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!mpc20)
return NULL;
@@ -859,7 +867,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
{
int i;
struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!hubbub)
return NULL;
@@ -887,7 +895,7 @@ struct timing_generator *dcn20_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_KERNEL);
+ kzalloc(sizeof(struct optc), GFP_ATOMIC);
if (!tgn10)
return NULL;
@@ -909,6 +917,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -920,14 +929,18 @@ struct link_encoder *dcn20_link_encoder_create(
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc20)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn20_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -936,7 +949,24 @@ struct link_encoder *dcn20_link_encoder_create(
return &enc20->enc10.base;
}
-struct clock_source *dcn20_clock_source_create(
+static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
+static struct clock_source *dcn20_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -944,7 +974,7 @@ struct clock_source *dcn20_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
if (!clk_src)
return NULL;
@@ -955,6 +985,7 @@ struct clock_source *dcn20_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
@@ -984,6 +1015,11 @@ struct stream_encoder *dcn20_stream_encoder_create(
if (!enc1)
return NULL;
+ if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
+ if (eng_id >= ENGINE_ID_DIGD)
+ eng_id++;
+ }
+
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -1031,19 +1067,20 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn20_hwseq_create,
};
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
void dcn20_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
- kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
if (!dsc) {
BREAK_TO_DEBUGGER();
@@ -1060,9 +1097,8 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
*dsc = NULL;
}
-#endif
-static void destruct(struct dcn20_resource_pool *pool)
+static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
{
unsigned int i;
@@ -1073,12 +1109,10 @@ static void destruct(struct dcn20_resource_pool *pool)
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
-#endif
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
@@ -1171,6 +1205,8 @@ static void destruct(struct dcn20_resource_pool *pool)
if (pool->base.pp_smu != NULL)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
+ if (pool->base.oem_device != NULL)
+ dal_ddc_service_destroy(&pool->base.oem_device);
}
struct hubp *dcn20_hubp_create(
@@ -1178,7 +1214,7 @@ struct hubp *dcn20_hubp_create(
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
- kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
if (!hubp2)
return NULL;
@@ -1197,13 +1233,25 @@ static void get_pixel_clock_parameters(
struct pixel_clk_params *pixel_clk_params)
{
const struct dc_stream_state *stream = pipe_ctx->stream;
- bool odm_combine = dc_res_get_odm_bottom_pipe(pipe_ctx) != NULL;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ struct dc_link *link = stream->link;
+ struct link_encoder *link_enc = NULL;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (link_enc)
+ pixel_clk_params->encoder_object_id = link_enc->id;
pixel_clk_params->signal_type = pipe_ctx->stream->signal;
pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
/* TODO: un-hardcode*/
+ /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */
pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
LINK_RATE_REF_FREQ_IN_KHZ;
pixel_clk_params->flags.ENABLE_SS = 0;
@@ -1215,7 +1263,9 @@ static void get_pixel_clock_parameters(
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
pixel_clk_params->color_depth = COLOR_DEPTH_888;
- if (optc1_is_two_pixels_per_containter(&stream->timing) || odm_combine)
+ if (opp_cnt == 4)
+ pixel_clk_params->requested_pix_clk_100hz /= 4;
+ else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
pixel_clk_params->requested_pix_clk_100hz /= 2;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -1254,24 +1304,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status status = DC_OK;
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
- /*TODO Seems unneeded anymore */
- /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- todo: shouldn't have to copy missing parameter here
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- stream->clamping.pixel_encoding =
- stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- build_clamping_params(stream);
-
- continue;
- }
- }
- */
-
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1281,17 +1313,33 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
return status;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static void acquire_dsc(struct resource_context *res_ctx,
- const struct resource_pool *pool,
- struct display_stream_compressor **dsc)
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx)
{
int i;
+ const struct resource_pool *pool = dc->res_pool;
+ struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
- ASSERT(*dsc == NULL);
+ ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
*dsc = NULL;
+ /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
+ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+ *dsc = pool->dscs[pipe_idx];
+ res_ctx->is_dsc_acquired[pipe_idx] = true;
+ return;
+ }
+
+ /* Return old DSC to avoid the need for re-programming */
+ if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+ *dsc = dsc_old;
+ res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+ return ;
+ }
+
/* Find first free DSC */
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (!res_ctx->is_dsc_acquired[i]) {
@@ -1301,7 +1349,7 @@ static void acquire_dsc(struct resource_context *res_ctx,
}
}
-static void release_dsc(struct resource_context *res_ctx,
+void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
struct display_stream_compressor **dsc)
{
@@ -1315,17 +1363,14 @@ static void release_dsc(struct resource_context *res_ctx,
}
}
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
struct dc_state *dc_ctx,
struct dc_stream_state *dc_stream)
{
enum dc_status result = DC_OK;
int i;
- const struct resource_pool *pool = dc->res_pool;
/* Get a DSC if required and available */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1334,11 +1379,13 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream != dc_stream)
continue;
- acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
+ if (pipe_ctx->stream_res.dsc)
+ continue;
+
+ dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
- dm_output_to_console("No DSCs available\n");
result = DC_NO_DSC_RESOURCE;
}
@@ -1359,24 +1406,17 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
for (i = 0; i < MAX_PIPES; i++) {
if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
- break;
+
+ if (pipe_ctx->stream_res.dsc)
+ dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
}
}
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
-
- if (pipe_ctx->stream_res.dsc) {
- struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
-
- release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
- if (odm_pipe)
- release_dsc(&new_ctx->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
- }
-
- return DC_OK;
+ else
+ return DC_OK;
}
-#endif
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
@@ -1388,11 +1428,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,
if (result == DC_OK)
result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Get a DSC if required and available */
if (result == DC_OK && dc_stream->timing.flags.DSC)
- result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
-#endif
+ result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
@@ -1405,85 +1443,103 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_
{
enum dc_status result = DC_OK;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
-#endif
return result;
}
-
-static void swizzle_to_dml_params(
- enum swizzle_mode_values swizzle,
- unsigned int *sw_mode)
+bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe)
{
- switch (swizzle) {
- case DC_SW_LINEAR:
- *sw_mode = dm_sw_linear;
- break;
- case DC_SW_4KB_S:
- *sw_mode = dm_sw_4kb_s;
- break;
- case DC_SW_4KB_S_X:
- *sw_mode = dm_sw_4kb_s_x;
- break;
- case DC_SW_4KB_D:
- *sw_mode = dm_sw_4kb_d;
- break;
- case DC_SW_4KB_D_X:
- *sw_mode = dm_sw_4kb_d_x;
- break;
- case DC_SW_64KB_S:
- *sw_mode = dm_sw_64kb_s;
- break;
- case DC_SW_64KB_S_X:
- *sw_mode = dm_sw_64kb_s_x;
- break;
- case DC_SW_64KB_S_T:
- *sw_mode = dm_sw_64kb_s_t;
- break;
- case DC_SW_64KB_D:
- *sw_mode = dm_sw_64kb_d;
- break;
- case DC_SW_64KB_D_X:
- *sw_mode = dm_sw_64kb_d_x;
- break;
- case DC_SW_64KB_D_T:
- *sw_mode = dm_sw_64kb_d_t;
- break;
- case DC_SW_64KB_R_X:
- *sw_mode = dm_sw_64kb_r_x;
- break;
- case DC_SW_VAR_S:
- *sw_mode = dm_sw_var_s;
- break;
- case DC_SW_VAR_S_X:
- *sw_mode = dm_sw_var_s_x;
- break;
- case DC_SW_VAR_D:
- *sw_mode = dm_sw_var_d;
- break;
- case DC_SW_VAR_D_X:
- *sw_mode = dm_sw_var_d_x;
- break;
+ int pipe_idx = next_odm_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
- default:
- ASSERT(0); /* Not supported */
- break;
+ *next_odm_pipe = *prev_odm_pipe;
+
+ next_odm_pipe->pipe_idx = pipe_idx;
+ next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
+ next_odm_pipe->stream_res.dsc = NULL;
+ if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
+ next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
+ next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
+ }
+ if (prev_odm_pipe->top_pipe && prev_odm_pipe->top_pipe->next_odm_pipe) {
+ prev_odm_pipe->top_pipe->next_odm_pipe->bottom_pipe = next_odm_pipe;
+ next_odm_pipe->top_pipe = prev_odm_pipe->top_pipe->next_odm_pipe;
+ }
+ if (prev_odm_pipe->bottom_pipe && prev_odm_pipe->bottom_pipe->next_odm_pipe) {
+ prev_odm_pipe->bottom_pipe->next_odm_pipe->top_pipe = next_odm_pipe;
+ next_odm_pipe->bottom_pipe = prev_odm_pipe->bottom_pipe->next_odm_pipe;
+ }
+ prev_odm_pipe->next_odm_pipe = next_odm_pipe;
+ next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
+
+ if (prev_odm_pipe->plane_state) {
+ struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
+ int new_width;
+
+ /* HACTIVE halved for odm combine */
+ sd->h_active /= 2;
+ /* Calculate new vp and recout for left pipe */
+ /* Need at least 16 pixels width per side */
+ if (sd->recout.x + 16 >= sd->h_active)
+ return false;
+ new_width = sd->h_active - sd->recout.x;
+ sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->recout.width - new_width));
+ sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->recout.width - new_width));
+ sd->recout.width = new_width;
+
+ /* Calculate new vp and recout for right pipe */
+ sd = &next_odm_pipe->plane_res.scl_data;
+ /* HACTIVE halved for odm combine */
+ sd->h_active /= 2;
+ /* Need at least 16 pixels width per side */
+ if (new_width <= 16)
+ return false;
+ new_width = sd->recout.width + sd->recout.x - sd->h_active;
+ sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->recout.width - new_width));
+ sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->recout.width - new_width));
+ sd->recout.width = new_width;
+ sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->h_active - sd->recout.x));
+ sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->h_active - sd->recout.x));
+ sd->recout.x = 0;
+ }
+ if (!next_odm_pipe->top_pipe)
+ next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
+ else
+ next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp;
+ if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) {
+ dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
+ ASSERT(next_odm_pipe->stream_res.dsc);
+ if (next_odm_pipe->stream_res.dsc == NULL)
+ return false;
}
+
+ return true;
}
-static bool dcn20_split_stream_for_combine(
+void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
- struct pipe_ctx *secondary_pipe,
- bool is_odm_combine)
+ struct pipe_ctx *secondary_pipe)
{
int pipe_idx = secondary_pipe->pipe_idx;
- struct scaler_data *sd = &primary_pipe->plane_res.scl_data;
struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
- int new_width;
*secondary_pipe = *primary_pipe;
secondary_pipe->bottom_pipe = sec_bot_pipe;
@@ -1495,9 +1551,7 @@ static bool dcn20_split_stream_for_combine(
secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
secondary_pipe->stream_res.dsc = NULL;
-#endif
if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
ASSERT(!secondary_pipe->bottom_pipe);
secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
@@ -1506,392 +1560,7 @@ static bool dcn20_split_stream_for_combine(
primary_pipe->bottom_pipe = secondary_pipe;
secondary_pipe->top_pipe = primary_pipe;
- if (is_odm_combine) {
- if (primary_pipe->plane_state) {
- /* HACTIVE halved for odm combine */
- sd->h_active /= 2;
- /* Copy scl_data to secondary pipe */
- secondary_pipe->plane_res.scl_data = *sd;
-
- /* Calculate new vp and recout for left pipe */
- /* Need at least 16 pixels width per side */
- if (sd->recout.x + 16 >= sd->h_active)
- return false;
- new_width = sd->h_active - sd->recout.x;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
-
- /* Calculate new vp and recout for right pipe */
- sd = &secondary_pipe->plane_res.scl_data;
- new_width = sd->recout.width + sd->recout.x - sd->h_active;
- /* Need at least 16 pixels width per side */
- if (new_width <= 16)
- return false;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
- sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->h_active - sd->recout.x));
- sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->h_active - sd->recout.x));
- sd->recout.x = 0;
- }
- secondary_pipe->stream_res.opp = pool->opps[secondary_pipe->pipe_idx];
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- if (secondary_pipe->stream->timing.flags.DSC == 1) {
- acquire_dsc(res_ctx, pool, &secondary_pipe->stream_res.dsc);
- ASSERT(secondary_pipe->stream_res.dsc);
- if (secondary_pipe->stream_res.dsc == NULL)
- return false;
- }
-#endif
- } else {
- ASSERT(primary_pipe->plane_state);
- resource_build_scaling_params(primary_pipe);
- resource_build_scaling_params(secondary_pipe);
- }
-
- return true;
-}
-
-void dcn20_populate_dml_writeback_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
-{
- int pipe_cnt, i;
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
-
- /* Set writeback information */
- pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
- pipes[pipe_cnt].dout.num_active_wb++;
- pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
- pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
- pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
- pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
- pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
- pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
- pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
- pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
- pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
- pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
- if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
- if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
- pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
- else
- pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
- } else
- pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
-
- pipe_cnt++;
- }
-
-}
-
-int dcn20_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
-{
- int pipe_cnt, i;
- bool synchronized_vblank = true;
-
- for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
-
- if (pipe_cnt < 0) {
- pipe_cnt = i;
- continue;
- }
- if (!resource_are_streams_timing_synchronizable(
- res_ctx->pipe_ctx[pipe_cnt].stream,
- res_ctx->pipe_ctx[i].stream)) {
- synchronized_vblank = false;
- break;
- }
- }
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
- int output_bpc;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- /* todo:
- pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
- pipes[pipe_cnt].pipe.src.dcc = 0;
- pipes[pipe_cnt].pipe.src.vm = 0;*/
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
- /* todo: rotation?*/
- pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
-#endif
- if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
- pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
- /* 1/2 vblank */
- pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
- (timing->v_total - timing->v_addressable
- - timing->v_border_top - timing->v_border_bottom) / 2;
- /* 36 bytes dp, 32 hdmi */
- pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
- dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
- }
- pipes[pipe_cnt].pipe.src.dcc = false;
- pipes[pipe_cnt].pipe.src.dcc_rate = 1;
- pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
- pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch;
- pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start
- - timing->h_addressable
- - timing->h_border_left
- - timing->h_border_right;
- pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
- pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
- - timing->v_addressable
- - timing->v_border_top
- - timing->v_border_bottom;
- pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
- pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
- pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
- pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
- pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
- pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
- if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
- pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
- pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
- pipes[pipe_cnt].dout.dp_lanes = 4;
- pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
- pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
-
- switch (res_ctx->pipe_ctx[i].stream->signal) {
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- case SIGNAL_TYPE_DISPLAY_PORT:
- pipes[pipe_cnt].dout.output_type = dm_dp;
- break;
- case SIGNAL_TYPE_EDP:
- pipes[pipe_cnt].dout.output_type = dm_edp;
- break;
- case SIGNAL_TYPE_HDMI_TYPE_A:
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- pipes[pipe_cnt].dout.output_type = dm_hdmi;
- break;
- default:
- /* In case there is no signal, set dp with 4 lanes to allow max config */
- pipes[pipe_cnt].dout.output_type = dm_dp;
- pipes[pipe_cnt].dout.dp_lanes = 4;
- }
-
- switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
- case COLOR_DEPTH_666:
- output_bpc = 6;
- break;
- case COLOR_DEPTH_888:
- output_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- output_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- output_bpc = 12;
- break;
- case COLOR_DEPTH_141414:
- output_bpc = 14;
- break;
- case COLOR_DEPTH_161616:
- output_bpc = 16;
- break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
- case COLOR_DEPTH_999:
- output_bpc = 9;
- break;
- case COLOR_DEPTH_111111:
- output_bpc = 11;
- break;
-#endif
- default:
- output_bpc = 8;
- break;
- }
-
-
- switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) {
- case PIXEL_ENCODING_RGB:
- case PIXEL_ENCODING_YCBCR444:
- pipes[pipe_cnt].dout.output_format = dm_444;
- pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
- break;
- case PIXEL_ENCODING_YCBCR420:
- pipes[pipe_cnt].dout.output_format = dm_420;
- pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
- break;
- case PIXEL_ENCODING_YCBCR422:
- if (true) /* todo */
- pipes[pipe_cnt].dout.output_format = dm_s422;
- else
- pipes[pipe_cnt].dout.output_format = dm_n422;
- pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
- break;
- default:
- pipes[pipe_cnt].dout.output_format = dm_444;
- pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
- }
- pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
- if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
- == res_ctx->pipe_ctx[i].plane_state)
- pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
-
- /* todo: default max for now, until there is logic reflecting this in dc*/
- pipes[pipe_cnt].dout.output_bpc = 12;
- /*
- * Use max cursor settings for calculations to minimize
- * bw calculations due to cursor on/off
- */
- pipes[pipe_cnt].pipe.src.num_cursors = 2;
- pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
- pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
- pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
- pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
-
- if (!res_ctx->pipe_ctx[i].plane_state) {
- pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
- pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
- pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
- pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
- if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
- pipes[pipe_cnt].pipe.src.viewport_width = 1920;
- pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
- if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
- pipes[pipe_cnt].pipe.src.viewport_height = 1080;
- pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
- pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
- pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
- pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
- pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/
- pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/
- pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
- pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0;
- pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0;
- pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
- pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
- pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
- pipes[pipe_cnt].pipe.src.is_hsplit = 0;
- pipes[pipe_cnt].pipe.dest.odm_combine = 0;
- pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
- pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
- } else {
- struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
- struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
-
- pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
- pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
- && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
- || (res_ctx->pipe_ctx[i].top_pipe
- && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
- pipes[pipe_cnt].pipe.dest.odm_combine = (res_ctx->pipe_ctx[i].bottom_pipe
- && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln
- && res_ctx->pipe_ctx[i].bottom_pipe->stream_res.opp
- != res_ctx->pipe_ctx[i].stream_res.opp)
- || (res_ctx->pipe_ctx[i].top_pipe
- && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln
- && res_ctx->pipe_ctx[i].top_pipe->stream_res.opp
- != res_ctx->pipe_ctx[i].stream_res.opp);
- pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
- || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
- pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
- pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
- pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
- pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
- pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
- pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
- if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.video.luma_pitch;
- pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.video.chroma_pitch;
- pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.video.meta_pitch_l;
- pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.video.meta_pitch_c;
- } else {
- pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.grph.surface_pitch;
- pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.grph.meta_pitch;
- }
- pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
- pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
- pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
- pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
- if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
- } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
- }
-
- pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
- pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
- pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
- pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
- pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
- pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable =
- scl->ratios.vert.value != dc_fixpt_one.value
- || scl->ratios.horz.value != dc_fixpt_one.value
- || scl->ratios.vert_c.value != dc_fixpt_one.value
- || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
- || dc->debug.always_scale; /*support always scale*/
- pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps;
- pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
- pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps;
- pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
-
- pipes[pipe_cnt].pipe.src.macro_tile_size =
- swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
- swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
- &pipes[pipe_cnt].pipe.src.sw_mode);
-
- switch (pln->format) {
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
- pipes[pipe_cnt].pipe.src.source_format = dm_420_8;
- break;
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
- pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
- pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
- case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
- pipes[pipe_cnt].pipe.src.source_format = dm_444_16;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
- pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
- break;
- default:
- pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
- break;
- }
- }
-
- pipe_cnt++;
- }
-
- /* populate writeback information */
- dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
-
- return pipe_cnt;
+ ASSERT(primary_pipe->plane_state);
}
unsigned int dcn20_calc_max_scaled_time(
@@ -1931,7 +1600,7 @@ void dcn20_set_mcif_arb_params(
{
enum mmhubbub_wbif_mode wbif_mode;
struct mcif_arb_params *wb_arb_params;
- int i, j, k, dwb_pipe;
+ int i, j, dwb_pipe;
/* Writeback MCIF_WB arbitration parameters */
dwb_pipe = 0;
@@ -1955,11 +1624,10 @@ void dcn20_set_mcif_arb_params(
} else
wbif_mode = PACKED_444;
- for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
- wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
- wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
+ DC_FP_START();
+ dcn20_fpu_set_wb_arb_params(wb_arb_params, context, pipes, pipe_cnt, i);
+ DC_FP_END();
+
wb_arb_params->slice_lines = 32;
wb_arb_params->arbitration_slice = 2;
wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
@@ -1976,8 +1644,7 @@ void dcn20_set_mcif_arb_params(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
{
int i;
@@ -1986,53 +1653,146 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx->stream;
struct dsc_config dsc_cfg;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
/* Only need to validate top pipe */
- if (pipe_ctx->top_pipe || !stream || !stream->timing.flags.DSC)
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
continue;
- dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left
- + stream->timing.h_border_right;
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
+ + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
+ stream->timing.v_border_bottom;
- if (dc_res_get_odm_bottom_pipe(pipe_ctx))
- dsc_cfg.pic_width /= 2;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
return false;
}
return true;
}
-#endif
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
{
- bool out = false;
+ struct pipe_ctx *secondary_pipe = NULL;
- BW_VAL_TRACE_SETUP();
+ if (dc && primary_pipe) {
+ int j;
+ int preferred_pipe_idx = 0;
- int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
- int pipe_split_from[MAX_PIPES];
- bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
- bool force_split = false;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- bool failed_non_odm_dsc = false;
-#endif
- int split_threshold = dc->res_pool->pipe_count / 2;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
- DC_LOGGER_INIT(dc->ctx->logger);
+ /* first check the prev dc state:
+ * if this primary pipe has a bottom pipe in prev. state
+ * and if the bottom pipe is still available (which it should be),
+ * pick that pipe as secondary
+ * Same logic applies for ODM pipes
+ */
+ if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ }
+ if (secondary_pipe == NULL &&
+ dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
+ preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ }
- BW_VAL_TRACE_COUNT();
+ /*
+ * if this primary pipe does not have a bottom pipe in prev. state
+ * start backward and find a pipe that did not used to be a bottom pipe in
+ * prev. dc state. This way we make sure we keep the same assignment as
+ * last state and will not have to reprogram every pipe
+ */
+ if (secondary_pipe == NULL) {
+ for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
+ preferred_pipe_idx = j;
+
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ break;
+ }
+ }
+ }
+ }
+ /*
+ * We should never hit this assert unless assignments are shuffled around
+ * if this happens we will prob. hit a vsync tdr
+ */
+ ASSERT(secondary_pipe);
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (secondary_pipe == NULL) {
+ for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
+ preferred_pipe_idx = j;
+
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ break;
+ }
+ }
+ }
+ }
- ASSERT(pipes);
- if (!pipes)
- return false;
+ return secondary_pipe;
+}
+
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ /* merge previously split odm pipes since mode support needs to make the decision */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *odm_pipe = pipe->next_odm_pipe;
+ if (pipe->prev_odm_pipe)
+ continue;
+
+ pipe->next_odm_pipe = NULL;
+ while (odm_pipe) {
+ struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
+
+ odm_pipe->plane_state = NULL;
+ odm_pipe->stream = NULL;
+ odm_pipe->top_pipe = NULL;
+ odm_pipe->bottom_pipe = NULL;
+ odm_pipe->prev_odm_pipe = NULL;
+ odm_pipe->next_odm_pipe = NULL;
+ if (odm_pipe->stream_res.dsc)
+ dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
+ /* Clear plane_res and stream_res */
+ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
+ memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
+ odm_pipe = next_odm_pipe;
+ }
+ if (pipe->plane_state)
+ resource_build_scaling_params(pipe);
+ }
+
+ /* merge previously mpc split pipes since mode support needs to make the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
@@ -2040,7 +1800,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
continue;
- /* merge previously split pipe since mode support needs to make the decision */
pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
if (hsplit_pipe->bottom_pipe)
hsplit_pipe->bottom_pipe->top_pipe = pipe;
@@ -2048,129 +1807,259 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
hsplit_pipe->stream = NULL;
hsplit_pipe->top_pipe = NULL;
hsplit_pipe->bottom_pipe = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- if (hsplit_pipe->stream_res.dsc && hsplit_pipe->stream_res.dsc != pipe->stream_res.dsc)
- release_dsc(&context->res_ctx, dc->res_pool, &hsplit_pipe->stream_res.dsc);
-#endif
+
/* Clear plane_res and stream_res */
memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
+}
- if (dc->res_pool->funcs->populate_dml_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
- else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
-
- if (!pipe_cnt) {
- BW_VAL_TRACE_SKIP(pass);
- out = true;
- goto validate_out;
- }
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ int *split,
+ bool *merge)
+{
+ int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+ struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+ int max_mpc_comb = v->maxMpcComb;
+
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
- context->bw_ctx.dml.ip.odm_capable = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16))
+ avoid_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
+ }
+ if (plane_count > dc->res_pool->pipe_count / 2)
+ avoid_split = true;
- context->bw_ctx.dml.ip.odm_capable = odm_capable;
+ /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_crtc_timing timing;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- /* 1 dsc per stream dsc validation */
- if (vlevel <= context->bw_ctx.dml.soc.num_states)
- if (!dcn20_validate_dsc(dc, context)) {
- failed_non_odm_dsc = true;
- vlevel = context->bw_ctx.dml.soc.num_states + 1;
+ if (!pipe->stream)
+ continue;
+ else {
+ timing = pipe->stream->timing;
+ if (timing.h_border_left + timing.h_border_right
+ + timing.v_border_top + timing.v_border_bottom > 0) {
+ avoid_split = true;
+ break;
+ }
}
-#endif
-
- if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- if (vlevel > context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
+ }
- if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
- || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
- context->commit_hints.full_update_needed = true;
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
- /*initialize pipe_just_split_from to invalid idx*/
- for (i = 0; i < MAX_PIPES; i++)
- pipe_split_from[i] = -1;
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+ v->ModeSupport[vlevel][0])
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ else
+ max_mpc_comb = 0;
+ pipe_idx++;
+ }
+ v->maxMpcComb = max_mpc_comb;
+ }
- /* Single display only conditionals get set here */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- bool exit_loop = false;
+ int pipe_plane = v->pipe_plane[pipe_idx];
+ bool split4mpc = context->stream_count == 1 && plane_count == 1
+ && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
- if (!pipe->stream || pipe->top_pipe)
+ if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (dc->debug.force_single_disp_pipe_split) {
- if (!force_split)
- force_split = true;
- else {
- force_split = false;
- exit_loop = true;
- }
+ if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
+ split[i] = 4;
+ else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
+ split[i] = 2;
+
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = 2;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 2;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
- if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
- if (avoid_split)
- avoid_split = false;
- else {
- avoid_split = true;
- exit_loop = true;
+ if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 4;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
+ }
+ /*420 format workaround*/
+ if (pipe->stream->timing.h_addressable > 7680 &&
+ pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ split[i] = 4;
+ }
+ v->ODMCombineEnabled[pipe_plane] =
+ v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ if (get_num_mpc_splits(pipe) == 1) {
+ /*If need split for mpc but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 MPC */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 MPC */
+ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 2 -> 1 MPC */
+ } else if (get_num_mpc_splits(pipe) == 3) {
+ /*If need split for mpc but 4 way split already*/
+ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+ || !pipe->bottom_pipe)) {
+ merge[i] = true; /* 4 -> 2 MPC */
+ } else if (split[i] == 0 && pipe->top_pipe &&
+ pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 4 -> 1 MPC */
+ split[i] = 0;
+ } else if (get_num_odm_splits(pipe)) {
+ /* ODM -> MPC transition */
+ if (pipe->prev_odm_pipe) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ } else {
+ if (get_num_odm_splits(pipe) == 1) {
+ /*If need split for odm but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 ODM */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 ODM */
+ else if (pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ } else if (get_num_odm_splits(pipe) == 3) {
+ /*If need split for odm but 4 way split already*/
+ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+ || !pipe->next_odm_pipe)) {
+ merge[i] = true; /* 4 -> 2 ODM */
+ } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ split[i] = 0;
+ } else if (get_num_mpc_splits(pipe)) {
+ /* MPC -> ODM transition */
+ ASSERT(0); /* NOT expected yet */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ split[i] = 0;
+ merge[i] = true;
+ }
}
}
- if (exit_loop)
- break;
+
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) {
+ DC_FP_START();
+ dcn20_fpu_adjust_dppclk(v, vlevel, max_mpc_comb, pipe_idx, false);
+ DC_FP_END();
+ }
+ pipe_idx++;
}
- if (context->stream_count > split_threshold)
- avoid_split = true;
+ return vlevel;
+}
- vlevel_unsplit = vlevel;
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
- if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
- break;
- pipe_idx++;
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out,
+ bool fast_validate)
+{
+ bool out = false;
+ int split[MAX_PIPES] = { 0 };
+ int pipe_cnt, i, pipe_idx, vlevel;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ dcn20_merge_pipes_for_validate(dc, context);
+
+ DC_FP_START();
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ DC_FP_END();
+
+ *pipe_cnt_out = pipe_cnt;
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
}
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
+
+ /*initialize pipe_just_split_from to invalid idx*/
+ for (i = 0; i < MAX_PIPES; i++)
+ pipe_split_from[i] = -1;
+
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
- bool need_split = true;
- bool need_split3d;
if (!pipe->stream || pipe_split_from[i] >= 0)
continue;
pipe_idx++;
- if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- force_split = true;
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
- }
- if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
- if (dc->config.forced_clocks == true) {
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] =
- context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
- if (!dcn20_split_stream_for_combine(
- &context->res_ctx, dc->res_pool,
- pipe, hsplit_pipe,
- true))
+ if (!dcn20_split_stream_for_odm(
+ dc, &context->res_ctx,
+ pipe, hsplit_pipe))
goto validate_fail;
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
dcn20_build_mapped_resource(dc, context, pipe->stream);
@@ -2182,40 +2071,35 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
continue;
- need_split3d = ((pipe->stream->view_format ==
- VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- pipe->stream->view_format ==
- VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
- (pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
- pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_SIDE_BY_SIDE));
-
- if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
- need_split = false;
- vlevel = vlevel_unsplit;
- context->bw_ctx.dml.vba.maxMpcComb = 0;
- } else
- need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
-
/* We do not support mpo + odm at the moment */
if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
- if (need_split3d || need_split || force_split) {
+ if (split[i] == 2) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
- ASSERT(hsplit_pipe || force_split);
- if (!hsplit_pipe)
+ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe) {
+ DC_FP_START();
+ dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true);
+ DC_FP_END();
continue;
-
- if (!dcn20_split_stream_for_combine(
- &context->res_ctx, dc->res_pool,
- pipe, hsplit_pipe,
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]))
- goto validate_fail;
+ }
+ if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
+ if (!dcn20_split_stream_for_odm(
+ dc, &context->res_ctx,
+ pipe, hsplit_pipe))
+ goto validate_fail;
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
+ } else {
+ dcn20_split_stream_for_mpc(
+ &context->res_ctx, dc->res_pool,
+ pipe, hsplit_pipe);
+ resource_build_scaling_params(pipe);
+ resource_build_scaling_params(hsplit_pipe);
+ }
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
@@ -2223,185 +2107,35 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
ASSERT(0);
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Actual dsc count per stream dsc validation*/
- if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
+ if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
}
-#endif
-
- BW_VAL_TRACE_END_VOLTAGE_LEVEL();
-
- if (fast_validate) {
- BW_VAL_TRACE_SKIP(fast);
- out = true;
- goto validate_out;
- }
-
- for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
- pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
-
- if (pipe_split_from[i] < 0) {
- pipes[pipe_cnt].clks_cfg.dppclk_mhz =
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
- if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
- pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
- else
- pipes[pipe_cnt].pipe.dest.odm_combine = 0;
- pipe_idx++;
- } else {
- pipes[pipe_cnt].clks_cfg.dppclk_mhz =
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
- if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
- pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
- else
- pipes[pipe_cnt].pipe.dest.odm_combine = 0;
- }
- if (dc->config.forced_clocks) {
- pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- pipe_cnt++;
- }
-
- if (pipe_cnt != pipe_idx) {
- if (dc->res_pool->funcs->populate_dml_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
- else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
- }
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
- /* only pipe 0 is read for voltage and dcf/soc clocks */
- if (vlevel < 1) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- if (vlevel < 2) {
- pipes[0].clks_cfg.voltage = 2;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- if (vlevel < 3) {
- pipes[0].clks_cfg.voltage = 3;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- /* Writeback MCIF_WB arbitration parameters */
- dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
-
- context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
- context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
- context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
- context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
- context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
- context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
- context->bw_ctx.bw.dcn.clk.p_state_change_support =
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
- != dm_dram_clock_change_unsupported;
- context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
-
- BW_VAL_TRACE_END_WATERMARKS();
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- pipes[pipe_idx].pipe.dest.vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx];
- pipes[pipe_idx].pipe.dest.vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx];
- pipes[pipe_idx].pipe.dest.vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx];
- pipes[pipe_idx].pipe.dest.vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx];
- if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
- context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
- pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- context->res_ctx.pipe_ctx[i].stream_res.dscclk_khz =
- context->bw_ctx.dml.vba.DSCCLK_calculated[pipe_idx] * 1000;
-#endif
- context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
- pipe_idx++;
- }
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
- &context->res_ctx.pipe_ctx[i].dlg_regs,
- &context->res_ctx.pipe_ctx[i].ttu_regs,
- pipes,
- pipe_cnt,
- pipe_idx,
- cstate_en,
- context->bw_ctx.bw.dcn.clk.p_state_change_support,
- false, false, false);
-
- context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
- &context->res_ctx.pipe_ctx[i].rq_regs,
- pipes[pipe_idx].pipe);
- pipe_idx++;
- }
+ *vlevel_out = vlevel;
out = true;
goto validate_out;
validate_fail:
- DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
- dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
-
- BW_VAL_TRACE_SKIP(fail);
out = false;
validate_out:
- kfree(pipes);
-
- BW_VAL_TRACE_FINISH();
-
return out;
}
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported;
+ DC_FP_START();
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
+ DC_FP_END();
+ return voltage_supported;
+}
+
struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -2443,7 +2177,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
- destruct(dcn20_pool);
+ dcn20_resource_destruct(dcn20_pool);
kfree(dcn20_pool);
*pool = NULL;
}
@@ -2454,10 +2188,8 @@ static struct dc_cap_funcs cap_funcs = {
};
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
- enum dc_status result = DC_OK;
-
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
@@ -2469,19 +2201,22 @@ enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state
swizzle = DC_SW_64KB_S;
plane_state->tiling_info.gfx9.swizzle = swizzle;
- return result;
+ return DC_OK;
}
-static struct resource_funcs dcn20_res_pool_funcs = {
+static const struct resource_funcs dcn20_res_pool_funcs = {
.destroy = dcn20_destroy_resource_pool,
.link_enc_create = dcn20_link_encoder_create,
+ .panel_cntl_create = dcn20_panel_cntl_create,
.validate_bandwidth = dcn20_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
@@ -2490,8 +2225,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
- ASSERT(pipe_count > 0);
-
for (i = 0; i < pipe_count; i++) {
struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
GFP_KERNEL);
@@ -2537,9 +2270,9 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
if (!pp_smu)
return pp_smu;
@@ -2552,7 +2285,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
return pp_smu;
}
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -2560,276 +2293,48 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
}
}
-static void cap_soc_clocks(
- struct _vcs_dpi_soc_bounding_box_st *bb,
- struct pp_smu_nv_clock_table max_clocks)
+static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
+ uint32_t hw_internal_rev)
{
- int i;
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_soc;
- // First pass - cap all clocks higher than the reported max
- for (i = 0; i < bb->num_states; i++) {
- if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000))
- && max_clocks.dcfClockInKhz != 0)
- bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000);
+ if (ASICREV_IS_NAVI12_P(hw_internal_rev))
+ return &dcn2_0_nv12_soc;
- if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16)
- && max_clocks.uClockInKhz != 0)
- bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
-
- // HACK: Force every uclk to max for now to "disable" uclk switching.
- bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
-
- if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
- && max_clocks.fabricClockInKhz != 0)
- bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
-
- if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000))
- && max_clocks.displayClockInKhz != 0)
- bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000);
-
- if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000))
- && max_clocks.dppClockInKhz != 0)
- bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000);
-
- if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000))
- && max_clocks.phyClockInKhz != 0)
- bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000);
-
- if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000))
- && max_clocks.socClockInKhz != 0)
- bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000);
-
- if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000))
- && max_clocks.dscClockInKhz != 0)
- bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000);
- }
-
- // Second pass - remove all duplicate clock states
- for (i = bb->num_states - 1; i > 1; i--) {
- bool duplicate = true;
-
- if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz)
- duplicate = false;
- if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz)
- duplicate = false;
- if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz)
- duplicate = false;
- if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts)
- duplicate = false;
- if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz)
- duplicate = false;
- if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz)
- duplicate = false;
- if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz)
- duplicate = false;
- if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz)
- duplicate = false;
-
- if (duplicate)
- bb->num_states--;
- }
+ return &dcn2_0_soc;
}
-static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
- struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
+static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
+ uint32_t hw_internal_rev)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
- int i;
- int num_calculated_states = 0;
- int min_dcfclk = 0;
+ /* NV14 */
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_ip;
- if (num_states == 0)
- return;
-
- if (dc->bb_overrides.min_dcfclk_mhz > 0)
- min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
-
- for (i = 0; i < num_states; i++) {
- int min_fclk_required_by_uclk;
- calculated_states[i].state = i;
- calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
-
- // FCLK:UCLK ratio is 1.08
- min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
-
- calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
- min_dcfclk : min_fclk_required_by_uclk;
-
- calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ?
- max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
-
- calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ?
- max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
-
- calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000;
- calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000;
- calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3);
-
- calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000;
-
- num_calculated_states++;
- }
-
- memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits));
- bb->num_states = num_calculated_states;
-
- // Duplicate the last state, DML always an extra state identical to max state to work
- memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st));
- bb->clock_limits[num_calculated_states].state = bb->num_states;
+ /* NV12 and NV10 */
+ return &dcn2_0_ip;
}
-static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
{
- kernel_fpu_begin();
- if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
- && dc->bb_overrides.sr_exit_time_ns) {
- bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
- }
-
- if ((int)(bb->sr_enter_plus_exit_time_us * 1000)
- != dc->bb_overrides.sr_enter_plus_exit_time_ns
- && dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- bb->sr_enter_plus_exit_time_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
- }
-
- if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
- && dc->bb_overrides.urgent_latency_ns) {
- bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
- }
-
- if ((int)(bb->dram_clock_change_latency_us * 1000)
- != dc->bb_overrides.dram_clock_change_latency_ns
- && dc->bb_overrides.dram_clock_change_latency_ns) {
- bb->dram_clock_change_latency_us =
- dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- }
- kernel_fpu_end();
+ return DML_PROJECT_NAVI10v2;
}
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
-
static bool init_soc_bounding_box(struct dc *dc,
struct dcn20_resource_pool *pool)
{
- const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- if (!bb && !SOC_BOUNDING_BOX_VALID) {
- DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
- return false;
- }
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
+ get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
+ struct _vcs_dpi_ip_params_st *loaded_ip =
+ get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
- if (bb && !SOC_BOUNDING_BOX_VALID) {
- int i;
-
- dcn2_0_soc.sr_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_exit_time_us);
- dcn2_0_soc.sr_enter_plus_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
- dcn2_0_soc.urgent_latency_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_us);
- dcn2_0_soc.urgent_latency_pixel_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
- dcn2_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
- dcn2_0_soc.urgent_latency_vm_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
- dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
- dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
- dcn2_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
- dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
- dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
- dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
- dcn2_0_soc.max_avg_sdp_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
- dcn2_0_soc.max_avg_dram_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
- dcn2_0_soc.writeback_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_latency_us);
- dcn2_0_soc.ideal_dram_bw_after_urgent_percent =
- fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
- dcn2_0_soc.max_request_size_bytes =
- le32_to_cpu(bb->max_request_size_bytes);
- dcn2_0_soc.dram_channel_width_bytes =
- le32_to_cpu(bb->dram_channel_width_bytes);
- dcn2_0_soc.fabric_datapath_to_dcn_data_return_bytes =
- le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
- dcn2_0_soc.dcn_downspread_percent =
- fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
- dcn2_0_soc.downspread_percent =
- fixed16_to_double_to_cpu(bb->downspread_percent);
- dcn2_0_soc.dram_page_open_time_ns =
- fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
- dcn2_0_soc.dram_rw_turnaround_time_ns =
- fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
- dcn2_0_soc.dram_return_buffer_per_channel_bytes =
- le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
- dcn2_0_soc.round_trip_ping_latency_dcfclk_cycles =
- le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
- dcn2_0_soc.urgent_out_of_order_return_per_channel_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
- dcn2_0_soc.channel_interleave_bytes =
- le32_to_cpu(bb->channel_interleave_bytes);
- dcn2_0_soc.num_banks =
- le32_to_cpu(bb->num_banks);
- dcn2_0_soc.num_chans =
- le32_to_cpu(bb->num_chans);
- dcn2_0_soc.vmm_page_size_bytes =
- le32_to_cpu(bb->vmm_page_size_bytes);
- dcn2_0_soc.dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
- // HACK!! Lower uclock latency switch time so we don't switch
- dcn2_0_soc.dram_clock_change_latency_us = 10;
- dcn2_0_soc.writeback_dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
- dcn2_0_soc.return_bus_width_bytes =
- le32_to_cpu(bb->return_bus_width_bytes);
- dcn2_0_soc.dispclk_dppclk_vco_speed_mhz =
- le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
- dcn2_0_soc.xfc_bus_transport_time_us =
- le32_to_cpu(bb->xfc_bus_transport_time_us);
- dcn2_0_soc.xfc_xbuf_latency_tolerance_us =
- le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
- dcn2_0_soc.use_urgent_burst_bw =
- le32_to_cpu(bb->use_urgent_burst_bw);
- dcn2_0_soc.num_states =
- le32_to_cpu(bb->num_states);
-
- for (i = 0; i < dcn2_0_soc.num_states; i++) {
- dcn2_0_soc.clock_limits[i].state =
- le32_to_cpu(bb->clock_limits[i].state);
- dcn2_0_soc.clock_limits[i].dcfclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
- dcn2_0_soc.clock_limits[i].fabricclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
- dcn2_0_soc.clock_limits[i].dispclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
- dcn2_0_soc.clock_limits[i].dppclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
- dcn2_0_soc.clock_limits[i].phyclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
- dcn2_0_soc.clock_limits[i].socclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
- dcn2_0_soc.clock_limits[i].dscclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
- dcn2_0_soc.clock_limits[i].dram_speed_mts =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
- }
- }
+ DC_LOGGER_INIT(dc->ctx->logger);
if (pool->base.pp_smu) {
struct pp_smu_nv_clock_table max_clocks = {0};
unsigned int uclk_states[8] = {0};
unsigned int num_states = 0;
- int i;
enum pp_smu_status status;
bool clock_limits_available = false;
bool uclk_states_available = false;
@@ -2851,24 +2356,26 @@ static bool init_soc_bounding_box(struct dc *dc,
clock_limits_available = (status == PP_SMU_RESULT_OK);
}
- // HACK: Use the max uclk_states value for all elements.
- for (i = 0; i < num_states; i++)
- uclk_states[i] = uclk_states[num_states - 1];
-
- if (clock_limits_available && uclk_states_available && num_states)
- update_bounding_box(dc, &dcn2_0_soc, &max_clocks, uclk_states, num_states);
- else if (clock_limits_available)
- cap_soc_clocks(&dcn2_0_soc, max_clocks);
+ if (clock_limits_available && uclk_states_available && num_states) {
+ DC_FP_START();
+ dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
+ DC_FP_END();
+ } else if (clock_limits_available) {
+ DC_FP_START();
+ dcn20_cap_soc_clocks(loaded_bb, max_clocks);
+ DC_FP_END();
+ }
}
- dcn2_0_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn2_0_ip.max_num_dpp = pool->base.pipe_count;
- patch_bounding_box(dc, &dcn2_0_soc);
-
+ loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
+ loaded_ip->max_num_dpp = pool->base.pipe_count;
+ DC_FP_START();
+ dcn20_patch_bounding_box(dc, loaded_bb);
+ DC_FP_END();
return true;
}
-static bool construct(
+static bool dcn20_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn20_resource_pool *pool)
@@ -2876,28 +2383,81 @@ static bool construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ struct ddc_service_init_data ddc_init_data = {0};
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
+ get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
+ struct _vcs_dpi_ip_params_st *loaded_ip =
+ get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev);
+ enum dml_project dml_project_version =
+ get_dml_project_version(ctx->asic_id.hw_internal_rev);
ctx->dc_bios->regs = &bios_regs;
-
- pool->base.res_cap = &res_cap_nv10;
pool->base.funcs = &dcn20_res_pool_funcs;
+ if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
+ pool->base.res_cap = &res_cap_nv14;
+ pool->base.pipe_count = 5;
+ pool->base.mpcc_count = 5;
+ } else {
+ pool->base.res_cap = &res_cap_nv10;
+ pool->base.pipe_count = 6;
+ pool->base.mpcc_count = 6;
+ }
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
- pool->base.pipe_count = 6;
- pool->base.mpcc_count = 6;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/
dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.max_slave_planes = 1;
+ dc->caps.max_slave_yuv_planes = 1;
+ dc->caps.max_slave_rgb_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
- dc->caps.hw_3d_lut = true;
+ dc->caps.extended_aux_timeout_support = true;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 1;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2, only MPC ROM
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ dc->caps.hdmi_frl_pcon_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
@@ -2994,7 +2554,7 @@ static bool construct(
goto create_fail;
}
- dml_init_instance(&dc->dml, &dcn2_0_soc, &dcn2_0_ip, DML_PROJECT_NAVI10);
+ dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version);
if (!dc->debug.disable_pplib_wm_range) {
struct pp_smu_wm_range_sets ranges = {0};
@@ -3002,7 +2562,7 @@ static bool construct(
ranges.num_reader_wm_sets = 0;
- if (dcn2_0_soc.num_states == 1) {
+ if (loaded_bb->num_states == 1) {
ranges.reader_wm_sets[0].wm_inst = i;
ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
@@ -3010,13 +2570,14 @@ static bool construct(
ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
ranges.num_reader_wm_sets = 1;
- } else if (dcn2_0_soc.num_states > 1) {
- for (i = 0; i < 4 && i < dcn2_0_soc.num_states; i++) {
+ } else if (loaded_bb->num_states > 1) {
+ for (i = 0; i < 4 && i < loaded_bb->num_states; i++) {
ranges.reader_wm_sets[i].wm_inst = i;
ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (dcn2_0_soc.clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
- ranges.reader_wm_sets[i].max_fill_clk_mhz = dcn2_0_soc.clock_limits[i].dram_speed_mts / 16;
+ DC_FP_START();
+ dcn20_fpu_set_wm_ranges(i, &ranges, loaded_bb);
+ DC_FP_END();
ranges.num_reader_wm_sets = i + 1;
}
@@ -3123,7 +2684,6 @@ static bool construct(
goto create_fail;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
@@ -3132,7 +2692,6 @@ static bool construct(
goto create_fail;
}
}
-#endif
if (!dcn20_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
@@ -3152,6 +2711,19 @@ static bool construct(
dcn20_hw_sequencer_construct(dc);
+ // IF NV12, set PG function pointer to NULL. It's not that
+ // PG isn't supported for NV12, it's that we don't want to
+ // program the registers because that will cause more power
+ // to be consumed. We could have created dcn20_init_hw to get
+ // the same effect by checking ASIC rev, but there was a
+ // request at some point to not check ASIC rev on hw sequencer.
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->hwseq->funcs.enable_power_gating_plane = NULL;
+ dc->debug.disable_dpp_power_gate = true;
+ dc->debug.disable_hubp_power_gate = true;
+ }
+
+
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
@@ -3159,11 +2731,22 @@ static bool construct(
dc->cap_funcs = cap_funcs;
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ } else {
+ pool->base.oem_device = NULL;
+ }
+
return true;
create_fail:
- destruct(pool);
+ dcn20_resource_destruct(pool);
return false;
}
@@ -3173,12 +2756,12 @@ struct resource_pool *dcn20_create_resource_pool(
struct dc *dc)
{
struct dcn20_resource_pool *pool =
- kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
if (!pool)
return NULL;
- if (construct(init_data->num_virtual_links, dc, pool))
+ if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index b5a75289f444..7cbe1e9daa36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -27,6 +27,7 @@
#define __DC_RESOURCE_DCN20_H__
#include "core_types.h"
+#include "dml/dcn20/dcn20_fpu.h"
#define TO_DCN20_RES_POOL(pool)\
container_of(pool, struct dcn20_resource_pool, base)
@@ -35,6 +36,12 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
+extern struct _vcs_dpi_ip_params_st dcn2_0_ip;
+extern struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc;
+extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc;
+extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc;
+
struct dcn20_resource_pool {
struct resource_pool base;
};
@@ -49,14 +56,11 @@ unsigned int dcn20_calc_max_scaled_time(
unsigned int time_per_pixel,
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
-int dcn20_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
struct dc_state *state,
const struct resource_pool *pool,
struct dc_stream_state *stream);
-void dcn20_populate_dml_writeback_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
struct stream_encoder *dcn20_stream_encoder_create(
enum engine_id eng_id,
@@ -78,7 +82,6 @@ struct dpp *dcn20_dpp_create(
struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst);
-
struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst);
@@ -95,9 +98,6 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst);
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx);
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
-
struct hubp *dcn20_hubp_create(
struct dc_context *ctx,
uint32_t inst);
@@ -116,18 +116,51 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context);
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ int *split,
+ bool *merge);
+void dcn20_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc);
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
+void dcn20_split_stream_for_mpc(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe);
+bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe);
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx);
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out,
+ bool fast_validate);
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
-
-void dcn20_patch_bounding_box(
- struct dc *dc,
- struct _vcs_dpi_soc_bounding_box_st *bb);
-void dcn20_cap_soc_clocks(
- struct _vcs_dpi_soc_bounding_box_st *bb,
- struct pp_smu_nv_clock_table max_clocks);
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
#endif /* __DC_RESOURCE_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index f5bcffc426b8..aab25ca8343a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -29,6 +29,8 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
#define DC_LOGGER \
enc1->base.ctx->logger
@@ -152,11 +154,11 @@ static void enc2_stream_encoder_update_hdmi_info_packets(
/*Always add mandatory packets first followed by optional ones*/
enc2_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
- enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif);
+ enc2_update_hdmi_info_packet(enc1, 1, &info_frame->hfvsif);
enc2_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
- enc2_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
- enc2_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
- enc2_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
+ enc2_update_hdmi_info_packet(enc1, 3, &info_frame->vendor);
+ enc2_update_hdmi_info_packet(enc1, 4, &info_frame->spd);
+ enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hdrsmd);
}
static void enc2_stream_encoder_stop_hdmi_info_packets(
@@ -205,13 +207,12 @@ static void enc2_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC7_LINE, 0);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-
/* Update GSP7 SDP 128 byte long */
-static void enc2_send_gsp7_128_info_packet(
+static void enc2_update_gsp7_128_info_packet(
struct dcn10_stream_encoder *enc1,
- const struct dc_info_packet_128 *info_packet)
+ const struct dc_info_packet_128 *info_packet,
+ bool immediate_update)
{
uint32_t i;
@@ -266,7 +267,9 @@ static void enc2_send_gsp7_128_info_packet(
REG_WRITE(AFMT_GENERIC_7, *content++);
}
- REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, 1);
+ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC7_FRAME_UPDATE, !immediate_update,
+ AFMT_GENERIC7_IMMEDIATE_UPDATE, immediate_update);
}
/* Set DSC-related configuration.
@@ -277,18 +280,9 @@ static void enc2_send_gsp7_128_info_packet(
static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
- uint32_t dsc_slice_width,
- uint8_t *dsc_packed_pps)
+ uint32_t dsc_slice_width)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t dsc_value = 0;
-
- dsc_value = REG_READ(DP_DSC_CNTL);
-
- /* dsc disable skip */
- if ((dsc_value & 0x3) == 0x0)
- return;
-
REG_UPDATE_2(DP_DSC_CNTL,
DP_DSC_MODE, dsc_mode,
@@ -296,8 +290,17 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
REG_SET(DP_DSC_BYTES_PER_PIXEL, 0,
DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
+}
- if (dsc_mode != OPTC_DSC_DISABLED) {
+
+static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enable) {
struct dc_info_packet_128 pps_sdp;
ASSERT(dsc_packed_pps);
@@ -309,7 +312,7 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
pps_sdp.hb2 = 127;
pps_sdp.hb3 = 0;
memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb));
- enc2_send_gsp7_128_info_packet(enc1, &pps_sdp);
+ enc2_update_gsp7_128_info_packet(enc1, &pps_sdp, immediate_update);
/* Enable Generic Stream Packet 7 (GSP) transmission */
//REG_UPDATE(DP_SEC_CNTL,
@@ -340,9 +343,8 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 0);
}
}
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
@@ -363,7 +365,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
}
}
-#endif
/* Set Dynamic Metadata-configuration.
* enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME
@@ -373,7 +374,7 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
*
* Ensure the OTG master update lock is set when changing DME configuration.
*/
-static void enc2_set_dynamic_metadata(struct stream_encoder *enc,
+void enc2_set_dynamic_metadata(struct stream_encoder *enc,
bool enable_dme,
uint32_t hubp_requestor_id,
enum dynamic_metadata_mode dmdata_mode)
@@ -443,14 +444,13 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
-#endif
return two_pix;
}
void enc2_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
@@ -463,7 +463,7 @@ void enc2_stream_encoder_dp_unblank(
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
- if (is_two_pixels_per_containter(&param->timing) || param->odm) {
+ if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
}
@@ -495,15 +495,23 @@ void enc2_stream_encoder_dp_unblank(
DP_VID_N_MUL, n_multiply);
}
- /* set DIG_START to 0x1 to reset FIFO */
+ /* make sure stream is disabled before resetting steer fifo */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
+ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
+ /* set DIG_START to 0x1 to reset FIFO */
REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(1);
/* write 0 to take the FIFO out of reset */
REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
- /* switch DP encoder to CRTC data */
+ /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
+ * that it overflows during mode transition, and sometimes doesn't recover.
+ */
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
+ udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
@@ -521,6 +529,8 @@ void enc2_stream_encoder_dp_unblank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
static void enc2_dp_set_odm_combine(
@@ -536,16 +546,32 @@ void enc2_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting);
+ enc1_stream_encoder_dp_set_stream_attribute(enc,
+ crtc_timing,
+ output_color_space,
+ use_vsc_sdp_for_colorimetry,
+ enable_sdp_splitting);
REG_UPDATE(DP_SEC_FRAMING4,
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
}
+uint32_t enc2_get_fifo_cal_average_level(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t fifo_level;
+
+ REG_GET(DIG_FIFO_STATUS,
+ DIG_FIFO_CAL_AVERAGE_LEVEL, &fifo_level);
+ return fifo_level;
+}
+
static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.dp_set_odm_combine =
enc2_dp_set_odm_combine,
@@ -555,14 +581,16 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc1_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
enc1_stream_encoder_dvi_set_stream_attribute,
- .set_mst_bandwidth =
- enc1_stream_encoder_set_mst_bandwidth,
+ .set_throttled_vcp_size =
+ enc1_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
enc2_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc2_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets =
enc2_stream_encoder_update_dp_info_packets,
+ .send_immediate_sdp_message =
+ enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
@@ -580,14 +608,17 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.setup_stereo_sync = enc1_setup_stereo_sync,
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- .enc_read_state = enc2_read_state,
-#endif
+ .dig_source_otg = enc1_dig_source_otg,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ .dp_get_pixel_format =
+ enc1_stream_encoder_dp_get_pixel_format,
+
+ .enc_read_state = enc2_read_state,
.dp_set_dsc_config = enc2_dp_set_dsc_config,
-#endif
+ .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet,
.set_dynamic_metadata = enc2_set_dynamic_metadata,
+ .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
+ .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level,
};
void dcn20_stream_encoder_construct(
@@ -606,5 +637,6 @@ void dcn20_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index 6d40e8c9b78f..baa1e539f341 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -83,6 +83,8 @@
SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DOLBY_VISION_EN, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\
SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh)
void dcn20_stream_encoder_construct(
@@ -98,10 +100,20 @@ void enc2_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
void enc2_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param);
+void enc2_set_dynamic_metadata(struct stream_encoder *enc,
+ bool enable_dme,
+ uint32_t hubp_requestor_id,
+ enum dynamic_metadata_mode dmdata_mode);
+
+uint32_t enc2_get_fifo_cal_average_level(
+ struct stream_encoder *enc);
+
#endif /* __DC_STREAM_ENCODER_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
index 27679ef6ebe8..96c263223315 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dcn20_vmid.h"
#include "reg_helper.h"
@@ -36,6 +38,38 @@
#define FN(reg_name, field_name) \
vmid->shifts->field_name, vmid->masks->field_name
+static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid)
+{
+ /* According the hardware spec, we need to poll for the lowest
+ * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM
+ * context is updated. We can't use REG_WAIT here since we
+ * don't have a seperate field to wait on.
+ *
+ * TODO: Confirm timeout / poll interval with hardware team
+ */
+
+ int max_times = 10000;
+ int delay_us = 5;
+ int i;
+
+ for (i = 0; i < max_times; ++i) {
+ uint32_t entry_lo32;
+
+ REG_GET(PAGE_TABLE_BASE_ADDR_LO32,
+ VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32,
+ &entry_lo32);
+
+ if (entry_lo32 & 0x1)
+ return;
+
+ udelay(delay_us);
+ }
+
+ /* VM setup timed out */
+ DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n");
+ ASSERT(0);
+}
+
void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config)
{
REG_SET(PAGE_TABLE_START_ADDR_HI32, 0,
@@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_
REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF);
+ /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */
REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF);
+
+ dcn20_wait_for_vmid_ready(vmid);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
index 02fafb013fc6..f1ef46e8da5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
@@ -34,13 +34,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
#define DCN20_VMID_REG_LIST(id)\
SRI(CNTL, DCN_VM_CONTEXT, id),\
SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\