diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /drivers/gpu/drm/i915/intel_runtime_pm.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'drivers/gpu/drm/i915/intel_runtime_pm.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_runtime_pm.c | 101 | 
1 files changed, 79 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 66de4b2dc8b7..53a6eaa9671a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -542,6 +542,29 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)  	dev_priv->csr.dc_state = val;  } +/** + * gen9_set_dc_state - set target display C power state + * @dev_priv: i915 device instance + * @state: target DC power state + * - DC_STATE_DISABLE + * - DC_STATE_EN_UPTO_DC5 + * - DC_STATE_EN_UPTO_DC6 + * - DC_STATE_EN_DC9 + * + * Signal to DMC firmware/HW the target DC power state passed in @state. + * DMC/HW can turn off individual display clocks and power rails when entering + * a deeper DC power state (higher in number) and turns these back when exiting + * that state to a shallower power state (lower in number). The HW will decide + * when to actually enter a given state on an on-demand basis, for instance + * depending on the active state of display pipes. The state of display + * registers backed by affected power rails are saved/restored as needed. + * + * Based on the above enabling a deeper DC power state is asynchronous wrt. + * enabling it. Disabling a deeper power state is synchronous: for instance + * setting %DC_STATE_DISABLE won't complete until all HW resources are turned + * back on and register state is restored. This is guaranteed by the MMIO write + * to DC_STATE_EN blocking until the state is restored. + */  static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)  {  	uint32_t val; @@ -635,7 +658,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)  	assert_csr_loaded(dev_priv);  } -void skl_enable_dc6(struct drm_i915_private *dev_priv) +static void skl_enable_dc6(struct drm_i915_private *dev_priv)  {  	assert_can_enable_dc6(dev_priv); @@ -649,13 +672,6 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)  	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);  } -void skl_disable_dc6(struct drm_i915_private *dev_priv) -{ -	DRM_DEBUG_KMS("Disabling DC6\n"); - -	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); -} -  static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,  				   struct i915_power_well *power_well)  { @@ -2626,32 +2642,69 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)  	mutex_unlock(&power_domains->lock);  } -static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) +static inline +bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, +			  i915_reg_t reg, bool enable)  { -	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); -	POSTING_READ(DBUF_CTL); +	u32 val, status; +	val = I915_READ(reg); +	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); +	I915_WRITE(reg, val); +	POSTING_READ(reg);  	udelay(10); -	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) -		DRM_ERROR("DBuf power enable timeout\n"); +	status = I915_READ(reg) & DBUF_POWER_STATE; +	if ((enable && !status) || (!enable && status)) { +		DRM_ERROR("DBus power %s timeout!\n", +			  enable ? "enable" : "disable"); +		return false; +	} +	return true; +} + +static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) +{ +	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);  }  static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)  { -	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); -	POSTING_READ(DBUF_CTL); +	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); +} -	udelay(10); +static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) +{ +	if (INTEL_GEN(dev_priv) < 11) +		return 1; +	return 2; +} -	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) -		DRM_ERROR("DBuf power disable timeout!\n"); +void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, +			    u8 req_slices) +{ +	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; +	u32 val; +	bool ret; + +	if (req_slices > intel_dbuf_max_slices(dev_priv)) { +		DRM_ERROR("Invalid number of dbuf slices requested\n"); +		return; +	} + +	if (req_slices == hw_enabled_slices || req_slices == 0) +		return; + +	val = I915_READ(DBUF_CTL_S2); +	if (req_slices > hw_enabled_slices) +		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); +	else +		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); + +	if (ret) +		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;  } -/* - * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when - * needed and keep it disabled as much as possible. - */  static void icl_dbuf_enable(struct drm_i915_private *dev_priv)  {  	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); @@ -2663,6 +2716,8 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)  	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||  	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))  		DRM_ERROR("DBuf power enable timeout\n"); +	else +		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;  }  static void icl_dbuf_disable(struct drm_i915_private *dev_priv) @@ -2676,6 +2731,8 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)  	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||  	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))  		DRM_ERROR("DBuf power disable timeout!\n"); +	else +		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;  }  static void icl_mbus_init(struct drm_i915_private *dev_priv)  | 
