X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Fdrivers%2Fgpu%2Fdrm%2Fi915%2Fintel_psr.c;fp=kernel%2Fdrivers%2Fgpu%2Fdrm%2Fi915%2Fintel_psr.c;h=213581c215b300c6bd3e5cb43cf890d7a917f4bb;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hp=a8f9348259ae581f977ee1a244f1734947ef0be1;hpb=f93b97fd65072de626c074dbe099a1fff05ce060;p=kvmfornfv.git diff --git a/kernel/drivers/gpu/drm/i915/intel_psr.c b/kernel/drivers/gpu/drm/i915/intel_psr.c index a8f934825..213581c21 100644 --- a/kernel/drivers/gpu/drm/i915/intel_psr.c +++ b/kernel/drivers/gpu/drm/i915/intel_psr.c @@ -73,14 +73,14 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) } static void intel_psr_write_vsc(struct intel_dp *intel_dp, - struct edp_vsc_psr *vsc_psr) + const struct edp_vsc_psr *vsc_psr) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); - u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config->cpu_transcoder); - u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config->cpu_transcoder); + enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; + u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); uint32_t *data = (uint32_t *) vsc_psr; unsigned int i; @@ -90,12 +90,14 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp, I915_WRITE(ctl_reg, 0); POSTING_READ(ctl_reg); - for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { - if (i < sizeof(struct edp_vsc_psr)) - I915_WRITE(data_reg + i, *data++); - else - I915_WRITE(data_reg + i, 0); + for (i = 0; i < sizeof(*vsc_psr); i += 4) { + I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, + i >> 2), *data); + data++; } + for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) + I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, + i >> 2), 0); I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); POSTING_READ(ctl_reg); @@ -117,6 +119,19 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp) I915_WRITE(VLV_VSCSDP(pipe), val); } +static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp) +{ + struct edp_vsc_psr psr_vsc; + + /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x3; + psr_vsc.sdp_header.HB3 = 0xb; + intel_psr_write_vsc(intel_dp, &psr_vsc); +} + static void hsw_psr_setup_vsc(struct intel_dp *intel_dp) { struct edp_vsc_psr psr_vsc; @@ -133,7 +148,7 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp) static void vlv_psr_enable_sink(struct intel_dp *intel_dp) { drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE); + DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); } static void hsw_psr_enable_sink(struct intel_dp *intel_dp) @@ -157,13 +172,14 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); - /* Enable PSR in sink */ - if (dev_priv->psr.link_standby) - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); - else - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); + + /* Enable AUX frame sync at sink */ + if (dev_priv->psr.aux_frame_sync) + drm_dp_dpcd_writeb(&intel_dp->aux, + DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, + DP_AUX_FRAME_SYNC_ENABLE); aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ? DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev); @@ -183,8 +199,10 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) val |= DP_AUX_CH_CTL_TIME_OUT_1600us; val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK; val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); - /* Use hardcoded data values for PSR */ + /* Use hardcoded data values for PSR, frame sync and GTC */ val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL; + val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL; + val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL; I915_WRITE(aux_ctl_reg, val); } else { I915_WRITE(aux_ctl_reg, @@ -193,6 +211,8 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); } + + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE); } static void vlv_psr_enable_source(struct intel_dp *intel_dp) @@ -232,29 +252,39 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t max_sleep_time = 0x1f; /* Lately it was identified that depending on panel idle frame count * calculated at HW can be off by 1. So let's use what came - * from VBT + 1 and at minimum 2 to be on the safe side. + * from VBT + 1. + * There are also other cases where panel demands at least 4 + * but VBT is not being set. To cover these 2 cases lets use + * at least 5 when VBT isn't set to be on the safest side. */ uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ? - dev_priv->vbt.psr.idle_frames + 1 : 2; + dev_priv->vbt.psr.idle_frames + 1 : 5; uint32_t val = 0x0; const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; - if (dev_priv->psr.link_standby) { - val |= EDP_PSR_LINK_STANDBY; + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { + /* It doesn't mean we shouldn't send TPS patters, so let's + send the minimal TP1 possible and skip TP2. */ + val |= EDP_PSR_TP1_TIME_100us; val |= EDP_PSR_TP2_TP3_TIME_0us; - val |= EDP_PSR_TP1_TIME_0us; val |= EDP_PSR_SKIP_AUX_EXIT; - } else - val |= EDP_PSR_LINK_DISABLE; + /* Sink should be able to train with the 5 or 6 idle patterns */ + idle_frames += 4; + } I915_WRITE(EDP_PSR_CTL(dev), val | (IS_BROADWELL(dev) ? 0 : link_entry_time) | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | EDP_PSR_ENABLE); + + if (dev_priv->psr.psr2_support) + I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE | + EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100); } static bool intel_psr_match_conditions(struct intel_dp *intel_dp) @@ -294,6 +324,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } + if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) || + (dig_port->port != PORT_A))) { + DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); + return false; + } + dev_priv->psr.source_ok = true; return true; } @@ -332,6 +368,7 @@ void intel_psr_enable(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); if (!HAS_PSR(dev)) { DRM_DEBUG_KMS("PSR not supported on this platform\n"); @@ -352,21 +389,23 @@ void intel_psr_enable(struct intel_dp *intel_dp) if (!intel_psr_match_conditions(intel_dp)) goto unlock; - /* First we check VBT, but we must respect sink and source - * known restrictions */ - dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; - if ((intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) || - (IS_BROADWELL(dev) && intel_dig_port->port != PORT_A)) - dev_priv->psr.link_standby = true; - dev_priv->psr.busy_frontbuffer_bits = 0; if (HAS_DDI(dev)) { hsw_psr_setup_vsc(intel_dp); + if (dev_priv->psr.psr2_support) { + /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ + if (crtc->config->pipe_src_w > 3200 || + crtc->config->pipe_src_h > 2000) + dev_priv->psr.psr2_support = false; + else + skl_psr_setup_su_vsc(intel_dp); + } + /* Avoid continuous PSR exit by masking memup and hpd */ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); + EDP_PSR_DEBUG_MASK_HPD); /* Enable PSR on the panel */ hsw_psr_enable_sink(intel_dp); @@ -559,6 +598,52 @@ static void intel_psr_exit(struct drm_device *dev) dev_priv->psr.active = false; } +/** + * intel_psr_single_frame_update - Single Frame Update + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * Some platforms support a single frame update feature that is used to + * send and update only one frame on Remote Frame Buffer. + * So far it is only implemented for Valleyview and Cherryview because + * hardware requires this to be done before a page flip. + */ +void intel_psr_single_frame_update(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + enum pipe pipe; + u32 val; + + /* + * Single frame update is already supported on BDW+ but it requires + * many W/A and it isn't really needed. + */ + if (!IS_VALLEYVIEW(dev)) + return; + + mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) { + mutex_unlock(&dev_priv->psr.lock); + return; + } + + crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) { + val = I915_READ(VLV_PSRCTL(pipe)); + + /* + * We need to set this bit before writing registers for a flip. + * This bit will be self-clear when it gets to the PSR active state. + */ + I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE); + } + mutex_unlock(&dev_priv->psr.lock); +} + /** * intel_psr_invalidate - Invalidade PSR * @dev: DRM device @@ -572,7 +657,7 @@ static void intel_psr_exit(struct drm_device *dev) * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." */ void intel_psr_invalidate(struct drm_device *dev, - unsigned frontbuffer_bits) + unsigned frontbuffer_bits) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; @@ -587,11 +672,12 @@ void intel_psr_invalidate(struct drm_device *dev, crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; - intel_psr_exit(dev); - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); - dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; + + if (frontbuffer_bits) + intel_psr_exit(dev); + mutex_unlock(&dev_priv->psr.lock); } @@ -599,6 +685,7 @@ void intel_psr_invalidate(struct drm_device *dev, * intel_psr_flush - Flush PSR * @dev: DRM device * @frontbuffer_bits: frontbuffer plane tracking bits + * @origin: which operation caused the flush * * Since the hardware frontbuffer tracking has gaps we need to integrate * with the software frontbuffer tracking. This function gets called every @@ -608,11 +695,12 @@ void intel_psr_invalidate(struct drm_device *dev, * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. */ void intel_psr_flush(struct drm_device *dev, - unsigned frontbuffer_bits) + unsigned frontbuffer_bits, enum fb_op_origin origin) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; enum pipe pipe; + int delay_ms = HAS_DDI(dev) ? 100 : 500; mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { @@ -622,30 +710,33 @@ void intel_psr_flush(struct drm_device *dev, crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; - dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; - /* - * On Haswell sprite plane updates don't result in a psr invalidating - * signal in the hardware. Which means we need to manually fake this in - * software for all flushes, not just when we've seen a preceding - * invalidation through frontbuffer rendering. - */ - if (IS_HASWELL(dev) && - (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) - intel_psr_exit(dev); + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; - /* - * On Valleyview and Cherryview we don't use hardware tracking so - * any plane updates or cursor moves don't result in a PSR - * invalidating. Which means we need to manually fake this in - * software for all flushes, not just when we've seen a preceding - * invalidation through frontbuffer rendering. */ - if (!HAS_DDI(dev)) - intel_psr_exit(dev); + if (HAS_DDI(dev)) { + /* + * By definition every flush should mean invalidate + flush, + * however on core platforms let's minimize the + * disable/re-enable so we can avoid the invalidate when flip + * originated the flush. + */ + if (frontbuffer_bits && origin != ORIGIN_FLIP) + intel_psr_exit(dev); + } else { + /* + * On Valleyview and Cherryview we don't use hardware tracking + * so any plane updates or cursor moves don't result in a PSR + * invalidating. Which means we need to manually fake this in + * software for all flushes. + */ + if (frontbuffer_bits) + intel_psr_exit(dev); + } if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(100)); + msecs_to_jiffies(delay_ms)); mutex_unlock(&dev_priv->psr.lock); }