d714a4b5711e4e7fa390ec6b659d2683ef41f585
[kvmfornfv.git] / kernel / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 struct dp_link_dpll {
45         int link_bw;
46         struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50         { DP_LINK_BW_1_62,
51                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52         { DP_LINK_BW_2_7,
53                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57         { DP_LINK_BW_1_62,
58                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59         { DP_LINK_BW_2_7,
60                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64         { DP_LINK_BW_1_62,
65                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66         { DP_LINK_BW_2_7,
67                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71  * CHV supports eDP 1.4 that have  more link rates.
72  * Below only provides the fixed rate but exclude variable rate.
73  */
74 static const struct dp_link_dpll chv_dpll[] = {
75         /*
76          * CHV requires to program fractional division for m2.
77          * m2 is stored in fixed point format using formula below
78          * (m2_int << 22) | m2_fraction
79          */
80         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
81                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
83                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
85                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89                                   324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91                                  243000, 270000, 324000, 405000,
92                                  420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
94
95 /**
96  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97  * @intel_dp: DP struct
98  *
99  * If a CPU or PCH DP output is attached to an eDP panel, this function
100  * will return true, and false otherwise.
101  */
102 static bool is_edp(struct intel_dp *intel_dp)
103 {
104         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
107 }
108
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
110 {
111         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113         return intel_dig_port->base.base.dev;
114 }
115
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117 {
118         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
119 }
120
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
126                                       enum pipe pipe);
127
128 static int
129 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
130 {
131         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
132
133         switch (max_link_bw) {
134         case DP_LINK_BW_1_62:
135         case DP_LINK_BW_2_7:
136         case DP_LINK_BW_5_4:
137                 break;
138         default:
139                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140                      max_link_bw);
141                 max_link_bw = DP_LINK_BW_1_62;
142                 break;
143         }
144         return max_link_bw;
145 }
146
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148 {
149         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150         struct drm_device *dev = intel_dig_port->base.base.dev;
151         u8 source_max, sink_max;
152
153         source_max = 4;
154         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156                 source_max = 2;
157
158         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160         return min(source_max, sink_max);
161 }
162
163 /*
164  * The units on the numbers in the next two are... bizarre.  Examples will
165  * make it clearer; this one parallels an example in the eDP spec.
166  *
167  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168  *
169  *     270000 * 1 * 8 / 10 == 216000
170  *
171  * The actual data capacity of that configuration is 2.16Gbit/s, so the
172  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
173  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174  * 119000.  At 18bpp that's 2142000 kilobits per second.
175  *
176  * Thus the strange-looking division by 10 in intel_dp_link_required, to
177  * get the result in decakilobits instead of kilobits.
178  */
179
180 static int
181 intel_dp_link_required(int pixel_clock, int bpp)
182 {
183         return (pixel_clock * bpp + 9) / 10;
184 }
185
186 static int
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188 {
189         return (max_link_clock * max_lanes * 8) / 10;
190 }
191
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194                     struct drm_display_mode *mode)
195 {
196         struct intel_dp *intel_dp = intel_attached_dp(connector);
197         struct intel_connector *intel_connector = to_intel_connector(connector);
198         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199         int target_clock = mode->clock;
200         int max_rate, mode_rate, max_lanes, max_link_clock;
201
202         if (is_edp(intel_dp) && fixed_mode) {
203                 if (mode->hdisplay > fixed_mode->hdisplay)
204                         return MODE_PANEL;
205
206                 if (mode->vdisplay > fixed_mode->vdisplay)
207                         return MODE_PANEL;
208
209                 target_clock = fixed_mode->clock;
210         }
211
212         max_link_clock = intel_dp_max_link_rate(intel_dp);
213         max_lanes = intel_dp_max_lane_count(intel_dp);
214
215         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216         mode_rate = intel_dp_link_required(target_clock, 18);
217
218         if (mode_rate > max_rate)
219                 return MODE_CLOCK_HIGH;
220
221         if (mode->clock < 10000)
222                 return MODE_CLOCK_LOW;
223
224         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225                 return MODE_H_ILLEGAL;
226
227         return MODE_OK;
228 }
229
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
231 {
232         int     i;
233         uint32_t v = 0;
234
235         if (src_bytes > 4)
236                 src_bytes = 4;
237         for (i = 0; i < src_bytes; i++)
238                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239         return v;
240 }
241
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
243 {
244         int i;
245         if (dst_bytes > 4)
246                 dst_bytes = 4;
247         for (i = 0; i < dst_bytes; i++)
248                 dst[i] = src >> ((3-i) * 8);
249 }
250
251 /* hrawclock is 1/4 the FSB frequency */
252 static int
253 intel_hrawclk(struct drm_device *dev)
254 {
255         struct drm_i915_private *dev_priv = dev->dev_private;
256         uint32_t clkcfg;
257
258         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259         if (IS_VALLEYVIEW(dev))
260                 return 200;
261
262         clkcfg = I915_READ(CLKCFG);
263         switch (clkcfg & CLKCFG_FSB_MASK) {
264         case CLKCFG_FSB_400:
265                 return 100;
266         case CLKCFG_FSB_533:
267                 return 133;
268         case CLKCFG_FSB_667:
269                 return 166;
270         case CLKCFG_FSB_800:
271                 return 200;
272         case CLKCFG_FSB_1067:
273                 return 266;
274         case CLKCFG_FSB_1333:
275                 return 333;
276         /* these two are just a guess; one of them might be right */
277         case CLKCFG_FSB_1600:
278         case CLKCFG_FSB_1600_ALT:
279                 return 400;
280         default:
281                 return 133;
282         }
283 }
284
285 static void
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287                                     struct intel_dp *intel_dp);
288 static void
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290                                               struct intel_dp *intel_dp);
291
292 static void pps_lock(struct intel_dp *intel_dp)
293 {
294         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295         struct intel_encoder *encoder = &intel_dig_port->base;
296         struct drm_device *dev = encoder->base.dev;
297         struct drm_i915_private *dev_priv = dev->dev_private;
298         enum intel_display_power_domain power_domain;
299
300         /*
301          * See vlv_power_sequencer_reset() why we need
302          * a power domain reference here.
303          */
304         power_domain = intel_display_port_power_domain(encoder);
305         intel_display_power_get(dev_priv, power_domain);
306
307         mutex_lock(&dev_priv->pps_mutex);
308 }
309
310 static void pps_unlock(struct intel_dp *intel_dp)
311 {
312         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313         struct intel_encoder *encoder = &intel_dig_port->base;
314         struct drm_device *dev = encoder->base.dev;
315         struct drm_i915_private *dev_priv = dev->dev_private;
316         enum intel_display_power_domain power_domain;
317
318         mutex_unlock(&dev_priv->pps_mutex);
319
320         power_domain = intel_display_port_power_domain(encoder);
321         intel_display_power_put(dev_priv, power_domain);
322 }
323
324 static void
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326 {
327         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328         struct drm_device *dev = intel_dig_port->base.base.dev;
329         struct drm_i915_private *dev_priv = dev->dev_private;
330         enum pipe pipe = intel_dp->pps_pipe;
331         bool pll_enabled;
332         uint32_t DP;
333
334         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336                  pipe_name(pipe), port_name(intel_dig_port->port)))
337                 return;
338
339         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340                       pipe_name(pipe), port_name(intel_dig_port->port));
341
342         /* Preserve the BIOS-computed detected bit. This is
343          * supposed to be read-only.
344          */
345         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347         DP |= DP_PORT_WIDTH(1);
348         DP |= DP_LINK_TRAIN_PAT_1;
349
350         if (IS_CHERRYVIEW(dev))
351                 DP |= DP_PIPE_SELECT_CHV(pipe);
352         else if (pipe == PIPE_B)
353                 DP |= DP_PIPEB_SELECT;
354
355         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357         /*
358          * The DPLL for the pipe must be enabled for this to work.
359          * So enable temporarily it if it's not already enabled.
360          */
361         if (!pll_enabled)
362                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
365         /*
366          * Similar magic as in intel_dp_enable_port().
367          * We _must_ do this port enable + disable trick
368          * to make this power seqeuencer lock onto the port.
369          * Otherwise even VDD force bit won't work.
370          */
371         I915_WRITE(intel_dp->output_reg, DP);
372         POSTING_READ(intel_dp->output_reg);
373
374         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375         POSTING_READ(intel_dp->output_reg);
376
377         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378         POSTING_READ(intel_dp->output_reg);
379
380         if (!pll_enabled)
381                 vlv_force_pll_off(dev, pipe);
382 }
383
384 static enum pipe
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386 {
387         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388         struct drm_device *dev = intel_dig_port->base.base.dev;
389         struct drm_i915_private *dev_priv = dev->dev_private;
390         struct intel_encoder *encoder;
391         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
392         enum pipe pipe;
393
394         lockdep_assert_held(&dev_priv->pps_mutex);
395
396         /* We should never land here with regular DP ports */
397         WARN_ON(!is_edp(intel_dp));
398
399         if (intel_dp->pps_pipe != INVALID_PIPE)
400                 return intel_dp->pps_pipe;
401
402         /*
403          * We don't have power sequencer currently.
404          * Pick one that's not used by other ports.
405          */
406         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407                             base.head) {
408                 struct intel_dp *tmp;
409
410                 if (encoder->type != INTEL_OUTPUT_EDP)
411                         continue;
412
413                 tmp = enc_to_intel_dp(&encoder->base);
414
415                 if (tmp->pps_pipe != INVALID_PIPE)
416                         pipes &= ~(1 << tmp->pps_pipe);
417         }
418
419         /*
420          * Didn't find one. This should not happen since there
421          * are two power sequencers and up to two eDP ports.
422          */
423         if (WARN_ON(pipes == 0))
424                 pipe = PIPE_A;
425         else
426                 pipe = ffs(pipes) - 1;
427
428         vlv_steal_power_sequencer(dev, pipe);
429         intel_dp->pps_pipe = pipe;
430
431         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432                       pipe_name(intel_dp->pps_pipe),
433                       port_name(intel_dig_port->port));
434
435         /* init power sequencer on this pipe and port */
436         intel_dp_init_panel_power_sequencer(dev, intel_dp);
437         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
438
439         /*
440          * Even vdd force doesn't work until we've made
441          * the power sequencer lock in on the port.
442          */
443         vlv_power_sequencer_kick(intel_dp);
444
445         return intel_dp->pps_pipe;
446 }
447
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449                                enum pipe pipe);
450
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452                                enum pipe pipe)
453 {
454         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455 }
456
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458                                 enum pipe pipe)
459 {
460         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461 }
462
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464                          enum pipe pipe)
465 {
466         return true;
467 }
468
469 static enum pipe
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471                      enum port port,
472                      vlv_pipe_check pipe_check)
473 {
474         enum pipe pipe;
475
476         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478                         PANEL_PORT_SELECT_MASK;
479
480                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481                         continue;
482
483                 if (!pipe_check(dev_priv, pipe))
484                         continue;
485
486                 return pipe;
487         }
488
489         return INVALID_PIPE;
490 }
491
492 static void
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494 {
495         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496         struct drm_device *dev = intel_dig_port->base.base.dev;
497         struct drm_i915_private *dev_priv = dev->dev_private;
498         enum port port = intel_dig_port->port;
499
500         lockdep_assert_held(&dev_priv->pps_mutex);
501
502         /* try to find a pipe with this port selected */
503         /* first pick one where the panel is on */
504         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505                                                   vlv_pipe_has_pp_on);
506         /* didn't find one? pick one where vdd is on */
507         if (intel_dp->pps_pipe == INVALID_PIPE)
508                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509                                                           vlv_pipe_has_vdd_on);
510         /* didn't find one? pick one with just the correct port */
511         if (intel_dp->pps_pipe == INVALID_PIPE)
512                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513                                                           vlv_pipe_any);
514
515         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516         if (intel_dp->pps_pipe == INVALID_PIPE) {
517                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518                               port_name(port));
519                 return;
520         }
521
522         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523                       port_name(port), pipe_name(intel_dp->pps_pipe));
524
525         intel_dp_init_panel_power_sequencer(dev, intel_dp);
526         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
527 }
528
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530 {
531         struct drm_device *dev = dev_priv->dev;
532         struct intel_encoder *encoder;
533
534         if (WARN_ON(!IS_VALLEYVIEW(dev)))
535                 return;
536
537         /*
538          * We can't grab pps_mutex here due to deadlock with power_domain
539          * mutex when power_domain functions are called while holding pps_mutex.
540          * That also means that in order to use pps_pipe the code needs to
541          * hold both a power domain reference and pps_mutex, and the power domain
542          * reference get/put must be done while _not_ holding pps_mutex.
543          * pps_{lock,unlock}() do these steps in the correct order, so one
544          * should use them always.
545          */
546
547         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548                 struct intel_dp *intel_dp;
549
550                 if (encoder->type != INTEL_OUTPUT_EDP)
551                         continue;
552
553                 intel_dp = enc_to_intel_dp(&encoder->base);
554                 intel_dp->pps_pipe = INVALID_PIPE;
555         }
556 }
557
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559 {
560         struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562         if (HAS_PCH_SPLIT(dev))
563                 return PCH_PP_CONTROL;
564         else
565                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569 {
570         struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572         if (HAS_PCH_SPLIT(dev))
573                 return PCH_PP_STATUS;
574         else
575                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576 }
577
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579    This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581                               void *unused)
582 {
583         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584                                                  edp_notifier);
585         struct drm_device *dev = intel_dp_to_dev(intel_dp);
586         struct drm_i915_private *dev_priv = dev->dev_private;
587         u32 pp_div;
588         u32 pp_ctrl_reg, pp_div_reg;
589
590         if (!is_edp(intel_dp) || code != SYS_RESTART)
591                 return 0;
592
593         pps_lock(intel_dp);
594
595         if (IS_VALLEYVIEW(dev)) {
596                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
598                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
600                 pp_div = I915_READ(pp_div_reg);
601                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606                 msleep(intel_dp->panel_power_cycle_delay);
607         }
608
609         pps_unlock(intel_dp);
610
611         return 0;
612 }
613
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
615 {
616         struct drm_device *dev = intel_dp_to_dev(intel_dp);
617         struct drm_i915_private *dev_priv = dev->dev_private;
618
619         lockdep_assert_held(&dev_priv->pps_mutex);
620
621         if (IS_VALLEYVIEW(dev) &&
622             intel_dp->pps_pipe == INVALID_PIPE)
623                 return false;
624
625         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
626 }
627
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
629 {
630         struct drm_device *dev = intel_dp_to_dev(intel_dp);
631         struct drm_i915_private *dev_priv = dev->dev_private;
632
633         lockdep_assert_held(&dev_priv->pps_mutex);
634
635         if (IS_VALLEYVIEW(dev) &&
636             intel_dp->pps_pipe == INVALID_PIPE)
637                 return false;
638
639         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
640 }
641
642 static void
643 intel_dp_check_edp(struct intel_dp *intel_dp)
644 {
645         struct drm_device *dev = intel_dp_to_dev(intel_dp);
646         struct drm_i915_private *dev_priv = dev->dev_private;
647
648         if (!is_edp(intel_dp))
649                 return;
650
651         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654                               I915_READ(_pp_stat_reg(intel_dp)),
655                               I915_READ(_pp_ctrl_reg(intel_dp)));
656         }
657 }
658
659 static uint32_t
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661 {
662         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663         struct drm_device *dev = intel_dig_port->base.base.dev;
664         struct drm_i915_private *dev_priv = dev->dev_private;
665         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
666         uint32_t status;
667         bool done;
668
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
670         if (has_aux_irq)
671                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672                                           msecs_to_jiffies_timeout(10));
673         else
674                 done = wait_for_atomic(C, 10) == 0;
675         if (!done)
676                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677                           has_aux_irq);
678 #undef C
679
680         return status;
681 }
682
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
684 {
685         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686         struct drm_device *dev = intel_dig_port->base.base.dev;
687
688         /*
689          * The clock divider is based off the hrawclk, and would like to run at
690          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
691          */
692         return index ? 0 : intel_hrawclk(dev) / 2;
693 }
694
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698         struct drm_device *dev = intel_dig_port->base.base.dev;
699
700         if (index)
701                 return 0;
702
703         if (intel_dig_port->port == PORT_A) {
704                 if (IS_GEN6(dev) || IS_GEN7(dev))
705                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
706                 else
707                         return 225; /* eDP input clock at 450Mhz */
708         } else {
709                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710         }
711 }
712
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714 {
715         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716         struct drm_device *dev = intel_dig_port->base.base.dev;
717         struct drm_i915_private *dev_priv = dev->dev_private;
718
719         if (intel_dig_port->port == PORT_A) {
720                 if (index)
721                         return 0;
722                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724                 /* Workaround for non-ULT HSW */
725                 switch (index) {
726                 case 0: return 63;
727                 case 1: return 72;
728                 default: return 0;
729                 }
730         } else  {
731                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
732         }
733 }
734
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736 {
737         return index ? 0 : 100;
738 }
739
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 {
742         /*
743          * SKL doesn't need us to program the AUX clock divider (Hardware will
744          * derive the clock from CDCLK automatically). We still implement the
745          * get_aux_clock_divider vfunc to plug-in into the existing code.
746          */
747         return index ? 0 : 1;
748 }
749
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751                                       bool has_aux_irq,
752                                       int send_bytes,
753                                       uint32_t aux_clock_divider)
754 {
755         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756         struct drm_device *dev = intel_dig_port->base.base.dev;
757         uint32_t precharge, timeout;
758
759         if (IS_GEN6(dev))
760                 precharge = 3;
761         else
762                 precharge = 5;
763
764         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766         else
767                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769         return DP_AUX_CH_CTL_SEND_BUSY |
770                DP_AUX_CH_CTL_DONE |
771                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772                DP_AUX_CH_CTL_TIME_OUT_ERROR |
773                timeout |
774                DP_AUX_CH_CTL_RECEIVE_ERROR |
775                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
778 }
779
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781                                       bool has_aux_irq,
782                                       int send_bytes,
783                                       uint32_t unused)
784 {
785         return DP_AUX_CH_CTL_SEND_BUSY |
786                DP_AUX_CH_CTL_DONE |
787                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788                DP_AUX_CH_CTL_TIME_OUT_ERROR |
789                DP_AUX_CH_CTL_TIME_OUT_1600us |
790                DP_AUX_CH_CTL_RECEIVE_ERROR |
791                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793 }
794
795 static int
796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797                 const uint8_t *send, int send_bytes,
798                 uint8_t *recv, int recv_size)
799 {
800         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801         struct drm_device *dev = intel_dig_port->base.base.dev;
802         struct drm_i915_private *dev_priv = dev->dev_private;
803         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804         uint32_t ch_data = ch_ctl + 4;
805         uint32_t aux_clock_divider;
806         int i, ret, recv_bytes;
807         uint32_t status;
808         int try, clock = 0;
809         bool has_aux_irq = HAS_AUX_IRQ(dev);
810         bool vdd;
811
812         pps_lock(intel_dp);
813
814         /*
815          * We will be called with VDD already enabled for dpcd/edid/oui reads.
816          * In such cases we want to leave VDD enabled and it's up to upper layers
817          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818          * ourselves.
819          */
820         vdd = edp_panel_vdd_on(intel_dp);
821
822         /* dp aux is extremely sensitive to irq latency, hence request the
823          * lowest possible wakeup latency and so prevent the cpu from going into
824          * deep sleep states.
825          */
826         pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828         intel_dp_check_edp(intel_dp);
829
830         intel_aux_display_runtime_get(dev_priv);
831
832         /* Try to wait for any previous AUX channel activity */
833         for (try = 0; try < 3; try++) {
834                 status = I915_READ_NOTRACE(ch_ctl);
835                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836                         break;
837                 msleep(1);
838         }
839
840         if (try == 3) {
841                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842                      I915_READ(ch_ctl));
843                 ret = -EBUSY;
844                 goto out;
845         }
846
847         /* Only 5 data registers! */
848         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849                 ret = -E2BIG;
850                 goto out;
851         }
852
853         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855                                                           has_aux_irq,
856                                                           send_bytes,
857                                                           aux_clock_divider);
858
859                 /* Must try at least 3 times according to DP spec */
860                 for (try = 0; try < 5; try++) {
861                         /* Load the send data into the aux channel data registers */
862                         for (i = 0; i < send_bytes; i += 4)
863                                 I915_WRITE(ch_data + i,
864                                            intel_dp_pack_aux(send + i,
865                                                              send_bytes - i));
866
867                         /* Send the command and wait for it to complete */
868                         I915_WRITE(ch_ctl, send_ctl);
869
870                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872                         /* Clear done status and any errors */
873                         I915_WRITE(ch_ctl,
874                                    status |
875                                    DP_AUX_CH_CTL_DONE |
876                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
877                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879                         if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880                                       DP_AUX_CH_CTL_RECEIVE_ERROR))
881                                 continue;
882                         if (status & DP_AUX_CH_CTL_DONE)
883                                 goto done;
884                 }
885         }
886
887         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
888                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
889                 ret = -EBUSY;
890                 goto out;
891         }
892
893 done:
894         /* Check for timeout or receive error.
895          * Timeouts occur when the sink is not connected
896          */
897         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
898                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
899                 ret = -EIO;
900                 goto out;
901         }
902
903         /* Timeouts occur when the device isn't connected, so they're
904          * "normal" -- don't fill the kernel log with these */
905         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
906                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
907                 ret = -ETIMEDOUT;
908                 goto out;
909         }
910
911         /* Unload any bytes sent back from the other side */
912         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
913                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
914         if (recv_bytes > recv_size)
915                 recv_bytes = recv_size;
916
917         for (i = 0; i < recv_bytes; i += 4)
918                 intel_dp_unpack_aux(I915_READ(ch_data + i),
919                                     recv + i, recv_bytes - i);
920
921         ret = recv_bytes;
922 out:
923         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
924         intel_aux_display_runtime_put(dev_priv);
925
926         if (vdd)
927                 edp_panel_vdd_off(intel_dp, false);
928
929         pps_unlock(intel_dp);
930
931         return ret;
932 }
933
934 #define BARE_ADDRESS_SIZE       3
935 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
936 static ssize_t
937 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
938 {
939         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
940         uint8_t txbuf[20], rxbuf[20];
941         size_t txsize, rxsize;
942         int ret;
943
944         txbuf[0] = (msg->request << 4) |
945                 ((msg->address >> 16) & 0xf);
946         txbuf[1] = (msg->address >> 8) & 0xff;
947         txbuf[2] = msg->address & 0xff;
948         txbuf[3] = msg->size - 1;
949
950         switch (msg->request & ~DP_AUX_I2C_MOT) {
951         case DP_AUX_NATIVE_WRITE:
952         case DP_AUX_I2C_WRITE:
953                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
954                 rxsize = 2; /* 0 or 1 data bytes */
955
956                 if (WARN_ON(txsize > 20))
957                         return -E2BIG;
958
959                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
960
961                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
962                 if (ret > 0) {
963                         msg->reply = rxbuf[0] >> 4;
964
965                         if (ret > 1) {
966                                 /* Number of bytes written in a short write. */
967                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
968                         } else {
969                                 /* Return payload size. */
970                                 ret = msg->size;
971                         }
972                 }
973                 break;
974
975         case DP_AUX_NATIVE_READ:
976         case DP_AUX_I2C_READ:
977                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
978                 rxsize = msg->size + 1;
979
980                 if (WARN_ON(rxsize > 20))
981                         return -E2BIG;
982
983                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
984                 if (ret > 0) {
985                         msg->reply = rxbuf[0] >> 4;
986                         /*
987                          * Assume happy day, and copy the data. The caller is
988                          * expected to check msg->reply before touching it.
989                          *
990                          * Return payload size.
991                          */
992                         ret--;
993                         memcpy(msg->buffer, rxbuf + 1, ret);
994                 }
995                 break;
996
997         default:
998                 ret = -EINVAL;
999                 break;
1000         }
1001
1002         return ret;
1003 }
1004
1005 static void
1006 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1007 {
1008         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1009         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1010         enum port port = intel_dig_port->port;
1011         const char *name = NULL;
1012         int ret;
1013
1014         switch (port) {
1015         case PORT_A:
1016                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1017                 name = "DPDDC-A";
1018                 break;
1019         case PORT_B:
1020                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1021                 name = "DPDDC-B";
1022                 break;
1023         case PORT_C:
1024                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1025                 name = "DPDDC-C";
1026                 break;
1027         case PORT_D:
1028                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1029                 name = "DPDDC-D";
1030                 break;
1031         default:
1032                 BUG();
1033         }
1034
1035         /*
1036          * The AUX_CTL register is usually DP_CTL + 0x10.
1037          *
1038          * On Haswell and Broadwell though:
1039          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1040          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1041          *
1042          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1043          */
1044         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1045                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1046
1047         intel_dp->aux.name = name;
1048         intel_dp->aux.dev = dev->dev;
1049         intel_dp->aux.transfer = intel_dp_aux_transfer;
1050
1051         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1052                       connector->base.kdev->kobj.name);
1053
1054         ret = drm_dp_aux_register(&intel_dp->aux);
1055         if (ret < 0) {
1056                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1057                           name, ret);
1058                 return;
1059         }
1060
1061         ret = sysfs_create_link(&connector->base.kdev->kobj,
1062                                 &intel_dp->aux.ddc.dev.kobj,
1063                                 intel_dp->aux.ddc.dev.kobj.name);
1064         if (ret < 0) {
1065                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1066                 drm_dp_aux_unregister(&intel_dp->aux);
1067         }
1068 }
1069
1070 static void
1071 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1072 {
1073         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1074
1075         if (!intel_connector->mst_port)
1076                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1077                                   intel_dp->aux.ddc.dev.kobj.name);
1078         intel_connector_unregister(intel_connector);
1079 }
1080
1081 static void
1082 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1083 {
1084         u32 ctrl1;
1085
1086         pipe_config->ddi_pll_sel = SKL_DPLL0;
1087         pipe_config->dpll_hw_state.cfgcr1 = 0;
1088         pipe_config->dpll_hw_state.cfgcr2 = 0;
1089
1090         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1091         switch (link_clock / 2) {
1092         case 81000:
1093                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1094                                               SKL_DPLL0);
1095                 break;
1096         case 135000:
1097                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1098                                               SKL_DPLL0);
1099                 break;
1100         case 270000:
1101                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1102                                               SKL_DPLL0);
1103                 break;
1104         case 162000:
1105                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1106                                               SKL_DPLL0);
1107                 break;
1108         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1109         results in CDCLK change. Need to handle the change of CDCLK by
1110         disabling pipes and re-enabling them */
1111         case 108000:
1112                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1113                                               SKL_DPLL0);
1114                 break;
1115         case 216000:
1116                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1117                                               SKL_DPLL0);
1118                 break;
1119
1120         }
1121         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1122 }
1123
1124 static void
1125 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1126 {
1127         switch (link_bw) {
1128         case DP_LINK_BW_1_62:
1129                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1130                 break;
1131         case DP_LINK_BW_2_7:
1132                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1133                 break;
1134         case DP_LINK_BW_5_4:
1135                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1136                 break;
1137         }
1138 }
1139
1140 static int
1141 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1142 {
1143         if (intel_dp->num_sink_rates) {
1144                 *sink_rates = intel_dp->sink_rates;
1145                 return intel_dp->num_sink_rates;
1146         }
1147
1148         *sink_rates = default_rates;
1149
1150         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1151 }
1152
1153 static int
1154 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1155 {
1156         if (INTEL_INFO(dev)->gen >= 9) {
1157                 *source_rates = gen9_rates;
1158                 return ARRAY_SIZE(gen9_rates);
1159         } else if (IS_CHERRYVIEW(dev)) {
1160                 *source_rates = chv_rates;
1161                 return ARRAY_SIZE(chv_rates);
1162         }
1163
1164         *source_rates = default_rates;
1165
1166         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1167                 /* WaDisableHBR2:skl */
1168                 return (DP_LINK_BW_2_7 >> 3) + 1;
1169         else if (INTEL_INFO(dev)->gen >= 8 ||
1170             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1171                 return (DP_LINK_BW_5_4 >> 3) + 1;
1172         else
1173                 return (DP_LINK_BW_2_7 >> 3) + 1;
1174 }
1175
1176 static void
1177 intel_dp_set_clock(struct intel_encoder *encoder,
1178                    struct intel_crtc_state *pipe_config, int link_bw)
1179 {
1180         struct drm_device *dev = encoder->base.dev;
1181         const struct dp_link_dpll *divisor = NULL;
1182         int i, count = 0;
1183
1184         if (IS_G4X(dev)) {
1185                 divisor = gen4_dpll;
1186                 count = ARRAY_SIZE(gen4_dpll);
1187         } else if (HAS_PCH_SPLIT(dev)) {
1188                 divisor = pch_dpll;
1189                 count = ARRAY_SIZE(pch_dpll);
1190         } else if (IS_CHERRYVIEW(dev)) {
1191                 divisor = chv_dpll;
1192                 count = ARRAY_SIZE(chv_dpll);
1193         } else if (IS_VALLEYVIEW(dev)) {
1194                 divisor = vlv_dpll;
1195                 count = ARRAY_SIZE(vlv_dpll);
1196         }
1197
1198         if (divisor && count) {
1199                 for (i = 0; i < count; i++) {
1200                         if (link_bw == divisor[i].link_bw) {
1201                                 pipe_config->dpll = divisor[i].dpll;
1202                                 pipe_config->clock_set = true;
1203                                 break;
1204                         }
1205                 }
1206         }
1207 }
1208
1209 static int intersect_rates(const int *source_rates, int source_len,
1210                            const int *sink_rates, int sink_len,
1211                            int *common_rates)
1212 {
1213         int i = 0, j = 0, k = 0;
1214
1215         while (i < source_len && j < sink_len) {
1216                 if (source_rates[i] == sink_rates[j]) {
1217                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1218                                 return k;
1219                         common_rates[k] = source_rates[i];
1220                         ++k;
1221                         ++i;
1222                         ++j;
1223                 } else if (source_rates[i] < sink_rates[j]) {
1224                         ++i;
1225                 } else {
1226                         ++j;
1227                 }
1228         }
1229         return k;
1230 }
1231
1232 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1233                                  int *common_rates)
1234 {
1235         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1236         const int *source_rates, *sink_rates;
1237         int source_len, sink_len;
1238
1239         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1240         source_len = intel_dp_source_rates(dev, &source_rates);
1241
1242         return intersect_rates(source_rates, source_len,
1243                                sink_rates, sink_len,
1244                                common_rates);
1245 }
1246
1247 static void snprintf_int_array(char *str, size_t len,
1248                                const int *array, int nelem)
1249 {
1250         int i;
1251
1252         str[0] = '\0';
1253
1254         for (i = 0; i < nelem; i++) {
1255                 int r = snprintf(str, len, "%d,", array[i]);
1256                 if (r >= len)
1257                         return;
1258                 str += r;
1259                 len -= r;
1260         }
1261 }
1262
1263 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1264 {
1265         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1266         const int *source_rates, *sink_rates;
1267         int source_len, sink_len, common_len;
1268         int common_rates[DP_MAX_SUPPORTED_RATES];
1269         char str[128]; /* FIXME: too big for stack? */
1270
1271         if ((drm_debug & DRM_UT_KMS) == 0)
1272                 return;
1273
1274         source_len = intel_dp_source_rates(dev, &source_rates);
1275         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1276         DRM_DEBUG_KMS("source rates: %s\n", str);
1277
1278         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1279         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1280         DRM_DEBUG_KMS("sink rates: %s\n", str);
1281
1282         common_len = intel_dp_common_rates(intel_dp, common_rates);
1283         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1284         DRM_DEBUG_KMS("common rates: %s\n", str);
1285 }
1286
1287 static int rate_to_index(int find, const int *rates)
1288 {
1289         int i = 0;
1290
1291         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1292                 if (find == rates[i])
1293                         break;
1294
1295         return i;
1296 }
1297
1298 int
1299 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1300 {
1301         int rates[DP_MAX_SUPPORTED_RATES] = {};
1302         int len;
1303
1304         len = intel_dp_common_rates(intel_dp, rates);
1305         if (WARN_ON(len <= 0))
1306                 return 162000;
1307
1308         return rates[rate_to_index(0, rates) - 1];
1309 }
1310
1311 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1312 {
1313         return rate_to_index(rate, intel_dp->sink_rates);
1314 }
1315
1316 bool
1317 intel_dp_compute_config(struct intel_encoder *encoder,
1318                         struct intel_crtc_state *pipe_config)
1319 {
1320         struct drm_device *dev = encoder->base.dev;
1321         struct drm_i915_private *dev_priv = dev->dev_private;
1322         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1323         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1324         enum port port = dp_to_dig_port(intel_dp)->port;
1325         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1326         struct intel_connector *intel_connector = intel_dp->attached_connector;
1327         int lane_count, clock;
1328         int min_lane_count = 1;
1329         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1330         /* Conveniently, the link BW constants become indices with a shift...*/
1331         int min_clock = 0;
1332         int max_clock;
1333         int bpp, mode_rate;
1334         int link_avail, link_clock;
1335         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1336         int common_len;
1337
1338         common_len = intel_dp_common_rates(intel_dp, common_rates);
1339
1340         /* No common link rates between source and sink */
1341         WARN_ON(common_len <= 0);
1342
1343         max_clock = common_len - 1;
1344
1345         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1346                 pipe_config->has_pch_encoder = true;
1347
1348         pipe_config->has_dp_encoder = true;
1349         pipe_config->has_drrs = false;
1350         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1351
1352         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1353                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1354                                        adjusted_mode);
1355                 if (!HAS_PCH_SPLIT(dev))
1356                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1357                                                  intel_connector->panel.fitting_mode);
1358                 else
1359                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1360                                                 intel_connector->panel.fitting_mode);
1361         }
1362
1363         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1364                 return false;
1365
1366         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1367                       "max bw %d pixel clock %iKHz\n",
1368                       max_lane_count, common_rates[max_clock],
1369                       adjusted_mode->crtc_clock);
1370
1371         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1372          * bpc in between. */
1373         bpp = pipe_config->pipe_bpp;
1374         if (is_edp(intel_dp)) {
1375                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1376                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1377                                       dev_priv->vbt.edp_bpp);
1378                         bpp = dev_priv->vbt.edp_bpp;
1379                 }
1380
1381                 /*
1382                  * Use the maximum clock and number of lanes the eDP panel
1383                  * advertizes being capable of. The panels are generally
1384                  * designed to support only a single clock and lane
1385                  * configuration, and typically these values correspond to the
1386                  * native resolution of the panel.
1387                  */
1388                 min_lane_count = max_lane_count;
1389                 min_clock = max_clock;
1390         }
1391
1392         for (; bpp >= 6*3; bpp -= 2*3) {
1393                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1394                                                    bpp);
1395
1396                 for (clock = min_clock; clock <= max_clock; clock++) {
1397                         for (lane_count = min_lane_count;
1398                                 lane_count <= max_lane_count;
1399                                 lane_count <<= 1) {
1400
1401                                 link_clock = common_rates[clock];
1402                                 link_avail = intel_dp_max_data_rate(link_clock,
1403                                                                     lane_count);
1404
1405                                 if (mode_rate <= link_avail) {
1406                                         goto found;
1407                                 }
1408                         }
1409                 }
1410         }
1411
1412         return false;
1413
1414 found:
1415         if (intel_dp->color_range_auto) {
1416                 /*
1417                  * See:
1418                  * CEA-861-E - 5.1 Default Encoding Parameters
1419                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1420                  */
1421                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1422                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1423                 else
1424                         intel_dp->color_range = 0;
1425         }
1426
1427         if (intel_dp->color_range)
1428                 pipe_config->limited_color_range = true;
1429
1430         intel_dp->lane_count = lane_count;
1431
1432         if (intel_dp->num_sink_rates) {
1433                 intel_dp->link_bw = 0;
1434                 intel_dp->rate_select =
1435                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1436         } else {
1437                 intel_dp->link_bw =
1438                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1439                 intel_dp->rate_select = 0;
1440         }
1441
1442         pipe_config->pipe_bpp = bpp;
1443         pipe_config->port_clock = common_rates[clock];
1444
1445         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1446                       intel_dp->link_bw, intel_dp->lane_count,
1447                       pipe_config->port_clock, bpp);
1448         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1449                       mode_rate, link_avail);
1450
1451         intel_link_compute_m_n(bpp, lane_count,
1452                                adjusted_mode->crtc_clock,
1453                                pipe_config->port_clock,
1454                                &pipe_config->dp_m_n);
1455
1456         if (intel_connector->panel.downclock_mode != NULL &&
1457                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1458                         pipe_config->has_drrs = true;
1459                         intel_link_compute_m_n(bpp, lane_count,
1460                                 intel_connector->panel.downclock_mode->clock,
1461                                 pipe_config->port_clock,
1462                                 &pipe_config->dp_m2_n2);
1463         }
1464
1465         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1466                 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1467         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1468                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1469         else
1470                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1471
1472         return true;
1473 }
1474
1475 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1476 {
1477         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1478         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1479         struct drm_device *dev = crtc->base.dev;
1480         struct drm_i915_private *dev_priv = dev->dev_private;
1481         u32 dpa_ctl;
1482
1483         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1484                       crtc->config->port_clock);
1485         dpa_ctl = I915_READ(DP_A);
1486         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1487
1488         if (crtc->config->port_clock == 162000) {
1489                 /* For a long time we've carried around a ILK-DevA w/a for the
1490                  * 160MHz clock. If we're really unlucky, it's still required.
1491                  */
1492                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1493                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1494                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1495         } else {
1496                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1497                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1498         }
1499
1500         I915_WRITE(DP_A, dpa_ctl);
1501
1502         POSTING_READ(DP_A);
1503         udelay(500);
1504 }
1505
1506 static void intel_dp_prepare(struct intel_encoder *encoder)
1507 {
1508         struct drm_device *dev = encoder->base.dev;
1509         struct drm_i915_private *dev_priv = dev->dev_private;
1510         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1511         enum port port = dp_to_dig_port(intel_dp)->port;
1512         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1513         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1514
1515         /*
1516          * There are four kinds of DP registers:
1517          *
1518          *      IBX PCH
1519          *      SNB CPU
1520          *      IVB CPU
1521          *      CPT PCH
1522          *
1523          * IBX PCH and CPU are the same for almost everything,
1524          * except that the CPU DP PLL is configured in this
1525          * register
1526          *
1527          * CPT PCH is quite different, having many bits moved
1528          * to the TRANS_DP_CTL register instead. That
1529          * configuration happens (oddly) in ironlake_pch_enable
1530          */
1531
1532         /* Preserve the BIOS-computed detected bit. This is
1533          * supposed to be read-only.
1534          */
1535         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1536
1537         /* Handle DP bits in common between all three register formats */
1538         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1539         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1540
1541         if (crtc->config->has_audio)
1542                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1543
1544         /* Split out the IBX/CPU vs CPT settings */
1545
1546         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1547                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1548                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1549                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1550                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1551                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1552
1553                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1554                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1555
1556                 intel_dp->DP |= crtc->pipe << 29;
1557         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1558                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1559                         intel_dp->DP |= intel_dp->color_range;
1560
1561                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1562                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1563                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1564                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1565                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1566
1567                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1568                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1569
1570                 if (!IS_CHERRYVIEW(dev)) {
1571                         if (crtc->pipe == 1)
1572                                 intel_dp->DP |= DP_PIPEB_SELECT;
1573                 } else {
1574                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1575                 }
1576         } else {
1577                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1578         }
1579 }
1580
1581 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1582 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1583
1584 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1585 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1586
1587 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1588 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1589
1590 static void wait_panel_status(struct intel_dp *intel_dp,
1591                                        u32 mask,
1592                                        u32 value)
1593 {
1594         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1595         struct drm_i915_private *dev_priv = dev->dev_private;
1596         u32 pp_stat_reg, pp_ctrl_reg;
1597
1598         lockdep_assert_held(&dev_priv->pps_mutex);
1599
1600         pp_stat_reg = _pp_stat_reg(intel_dp);
1601         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1602
1603         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1604                         mask, value,
1605                         I915_READ(pp_stat_reg),
1606                         I915_READ(pp_ctrl_reg));
1607
1608         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1609                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1610                                 I915_READ(pp_stat_reg),
1611                                 I915_READ(pp_ctrl_reg));
1612         }
1613
1614         DRM_DEBUG_KMS("Wait complete\n");
1615 }
1616
1617 static void wait_panel_on(struct intel_dp *intel_dp)
1618 {
1619         DRM_DEBUG_KMS("Wait for panel power on\n");
1620         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1621 }
1622
1623 static void wait_panel_off(struct intel_dp *intel_dp)
1624 {
1625         DRM_DEBUG_KMS("Wait for panel power off time\n");
1626         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1627 }
1628
1629 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1630 {
1631         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1632
1633         /* When we disable the VDD override bit last we have to do the manual
1634          * wait. */
1635         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1636                                        intel_dp->panel_power_cycle_delay);
1637
1638         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1639 }
1640
1641 static void wait_backlight_on(struct intel_dp *intel_dp)
1642 {
1643         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1644                                        intel_dp->backlight_on_delay);
1645 }
1646
1647 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1648 {
1649         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1650                                        intel_dp->backlight_off_delay);
1651 }
1652
1653 /* Read the current pp_control value, unlocking the register if it
1654  * is locked
1655  */
1656
1657 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1658 {
1659         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1660         struct drm_i915_private *dev_priv = dev->dev_private;
1661         u32 control;
1662
1663         lockdep_assert_held(&dev_priv->pps_mutex);
1664
1665         control = I915_READ(_pp_ctrl_reg(intel_dp));
1666         control &= ~PANEL_UNLOCK_MASK;
1667         control |= PANEL_UNLOCK_REGS;
1668         return control;
1669 }
1670
1671 /*
1672  * Must be paired with edp_panel_vdd_off().
1673  * Must hold pps_mutex around the whole on/off sequence.
1674  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1675  */
1676 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1677 {
1678         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1679         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1680         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1681         struct drm_i915_private *dev_priv = dev->dev_private;
1682         enum intel_display_power_domain power_domain;
1683         u32 pp;
1684         u32 pp_stat_reg, pp_ctrl_reg;
1685         bool need_to_disable = !intel_dp->want_panel_vdd;
1686
1687         lockdep_assert_held(&dev_priv->pps_mutex);
1688
1689         if (!is_edp(intel_dp))
1690                 return false;
1691
1692         cancel_delayed_work(&intel_dp->panel_vdd_work);
1693         intel_dp->want_panel_vdd = true;
1694
1695         if (edp_have_panel_vdd(intel_dp))
1696                 return need_to_disable;
1697
1698         power_domain = intel_display_port_power_domain(intel_encoder);
1699         intel_display_power_get(dev_priv, power_domain);
1700
1701         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1702                       port_name(intel_dig_port->port));
1703
1704         if (!edp_have_panel_power(intel_dp))
1705                 wait_panel_power_cycle(intel_dp);
1706
1707         pp = ironlake_get_pp_control(intel_dp);
1708         pp |= EDP_FORCE_VDD;
1709
1710         pp_stat_reg = _pp_stat_reg(intel_dp);
1711         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1712
1713         I915_WRITE(pp_ctrl_reg, pp);
1714         POSTING_READ(pp_ctrl_reg);
1715         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1716                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1717         /*
1718          * If the panel wasn't on, delay before accessing aux channel
1719          */
1720         if (!edp_have_panel_power(intel_dp)) {
1721                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1722                               port_name(intel_dig_port->port));
1723                 msleep(intel_dp->panel_power_up_delay);
1724         }
1725
1726         return need_to_disable;
1727 }
1728
1729 /*
1730  * Must be paired with intel_edp_panel_vdd_off() or
1731  * intel_edp_panel_off().
1732  * Nested calls to these functions are not allowed since
1733  * we drop the lock. Caller must use some higher level
1734  * locking to prevent nested calls from other threads.
1735  */
1736 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1737 {
1738         bool vdd;
1739
1740         if (!is_edp(intel_dp))
1741                 return;
1742
1743         pps_lock(intel_dp);
1744         vdd = edp_panel_vdd_on(intel_dp);
1745         pps_unlock(intel_dp);
1746
1747         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1748              port_name(dp_to_dig_port(intel_dp)->port));
1749 }
1750
1751 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1752 {
1753         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1754         struct drm_i915_private *dev_priv = dev->dev_private;
1755         struct intel_digital_port *intel_dig_port =
1756                 dp_to_dig_port(intel_dp);
1757         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1758         enum intel_display_power_domain power_domain;
1759         u32 pp;
1760         u32 pp_stat_reg, pp_ctrl_reg;
1761
1762         lockdep_assert_held(&dev_priv->pps_mutex);
1763
1764         WARN_ON(intel_dp->want_panel_vdd);
1765
1766         if (!edp_have_panel_vdd(intel_dp))
1767                 return;
1768
1769         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1770                       port_name(intel_dig_port->port));
1771
1772         pp = ironlake_get_pp_control(intel_dp);
1773         pp &= ~EDP_FORCE_VDD;
1774
1775         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1776         pp_stat_reg = _pp_stat_reg(intel_dp);
1777
1778         I915_WRITE(pp_ctrl_reg, pp);
1779         POSTING_READ(pp_ctrl_reg);
1780
1781         /* Make sure sequencer is idle before allowing subsequent activity */
1782         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1783         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1784
1785         if ((pp & POWER_TARGET_ON) == 0)
1786                 intel_dp->last_power_cycle = jiffies;
1787
1788         power_domain = intel_display_port_power_domain(intel_encoder);
1789         intel_display_power_put(dev_priv, power_domain);
1790 }
1791
1792 static void edp_panel_vdd_work(struct work_struct *__work)
1793 {
1794         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1795                                                  struct intel_dp, panel_vdd_work);
1796
1797         pps_lock(intel_dp);
1798         if (!intel_dp->want_panel_vdd)
1799                 edp_panel_vdd_off_sync(intel_dp);
1800         pps_unlock(intel_dp);
1801 }
1802
1803 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1804 {
1805         unsigned long delay;
1806
1807         /*
1808          * Queue the timer to fire a long time from now (relative to the power
1809          * down delay) to keep the panel power up across a sequence of
1810          * operations.
1811          */
1812         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1813         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1814 }
1815
1816 /*
1817  * Must be paired with edp_panel_vdd_on().
1818  * Must hold pps_mutex around the whole on/off sequence.
1819  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1820  */
1821 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1822 {
1823         struct drm_i915_private *dev_priv =
1824                 intel_dp_to_dev(intel_dp)->dev_private;
1825
1826         lockdep_assert_held(&dev_priv->pps_mutex);
1827
1828         if (!is_edp(intel_dp))
1829                 return;
1830
1831         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1832              port_name(dp_to_dig_port(intel_dp)->port));
1833
1834         intel_dp->want_panel_vdd = false;
1835
1836         if (sync)
1837                 edp_panel_vdd_off_sync(intel_dp);
1838         else
1839                 edp_panel_vdd_schedule_off(intel_dp);
1840 }
1841
1842 static void edp_panel_on(struct intel_dp *intel_dp)
1843 {
1844         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1845         struct drm_i915_private *dev_priv = dev->dev_private;
1846         u32 pp;
1847         u32 pp_ctrl_reg;
1848
1849         lockdep_assert_held(&dev_priv->pps_mutex);
1850
1851         if (!is_edp(intel_dp))
1852                 return;
1853
1854         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1855                       port_name(dp_to_dig_port(intel_dp)->port));
1856
1857         if (WARN(edp_have_panel_power(intel_dp),
1858                  "eDP port %c panel power already on\n",
1859                  port_name(dp_to_dig_port(intel_dp)->port)))
1860                 return;
1861
1862         wait_panel_power_cycle(intel_dp);
1863
1864         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1865         pp = ironlake_get_pp_control(intel_dp);
1866         if (IS_GEN5(dev)) {
1867                 /* ILK workaround: disable reset around power sequence */
1868                 pp &= ~PANEL_POWER_RESET;
1869                 I915_WRITE(pp_ctrl_reg, pp);
1870                 POSTING_READ(pp_ctrl_reg);
1871         }
1872
1873         pp |= POWER_TARGET_ON;
1874         if (!IS_GEN5(dev))
1875                 pp |= PANEL_POWER_RESET;
1876
1877         I915_WRITE(pp_ctrl_reg, pp);
1878         POSTING_READ(pp_ctrl_reg);
1879
1880         wait_panel_on(intel_dp);
1881         intel_dp->last_power_on = jiffies;
1882
1883         if (IS_GEN5(dev)) {
1884                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1885                 I915_WRITE(pp_ctrl_reg, pp);
1886                 POSTING_READ(pp_ctrl_reg);
1887         }
1888 }
1889
1890 void intel_edp_panel_on(struct intel_dp *intel_dp)
1891 {
1892         if (!is_edp(intel_dp))
1893                 return;
1894
1895         pps_lock(intel_dp);
1896         edp_panel_on(intel_dp);
1897         pps_unlock(intel_dp);
1898 }
1899
1900
1901 static void edp_panel_off(struct intel_dp *intel_dp)
1902 {
1903         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1904         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1905         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1906         struct drm_i915_private *dev_priv = dev->dev_private;
1907         enum intel_display_power_domain power_domain;
1908         u32 pp;
1909         u32 pp_ctrl_reg;
1910
1911         lockdep_assert_held(&dev_priv->pps_mutex);
1912
1913         if (!is_edp(intel_dp))
1914                 return;
1915
1916         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1917                       port_name(dp_to_dig_port(intel_dp)->port));
1918
1919         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1920              port_name(dp_to_dig_port(intel_dp)->port));
1921
1922         pp = ironlake_get_pp_control(intel_dp);
1923         /* We need to switch off panel power _and_ force vdd, for otherwise some
1924          * panels get very unhappy and cease to work. */
1925         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1926                 EDP_BLC_ENABLE);
1927
1928         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1929
1930         intel_dp->want_panel_vdd = false;
1931
1932         I915_WRITE(pp_ctrl_reg, pp);
1933         POSTING_READ(pp_ctrl_reg);
1934
1935         intel_dp->last_power_cycle = jiffies;
1936         wait_panel_off(intel_dp);
1937
1938         /* We got a reference when we enabled the VDD. */
1939         power_domain = intel_display_port_power_domain(intel_encoder);
1940         intel_display_power_put(dev_priv, power_domain);
1941 }
1942
1943 void intel_edp_panel_off(struct intel_dp *intel_dp)
1944 {
1945         if (!is_edp(intel_dp))
1946                 return;
1947
1948         pps_lock(intel_dp);
1949         edp_panel_off(intel_dp);
1950         pps_unlock(intel_dp);
1951 }
1952
1953 /* Enable backlight in the panel power control. */
1954 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1955 {
1956         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1957         struct drm_device *dev = intel_dig_port->base.base.dev;
1958         struct drm_i915_private *dev_priv = dev->dev_private;
1959         u32 pp;
1960         u32 pp_ctrl_reg;
1961
1962         /*
1963          * If we enable the backlight right away following a panel power
1964          * on, we may see slight flicker as the panel syncs with the eDP
1965          * link.  So delay a bit to make sure the image is solid before
1966          * allowing it to appear.
1967          */
1968         wait_backlight_on(intel_dp);
1969
1970         pps_lock(intel_dp);
1971
1972         pp = ironlake_get_pp_control(intel_dp);
1973         pp |= EDP_BLC_ENABLE;
1974
1975         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1976
1977         I915_WRITE(pp_ctrl_reg, pp);
1978         POSTING_READ(pp_ctrl_reg);
1979
1980         pps_unlock(intel_dp);
1981 }
1982
1983 /* Enable backlight PWM and backlight PP control. */
1984 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1985 {
1986         if (!is_edp(intel_dp))
1987                 return;
1988
1989         DRM_DEBUG_KMS("\n");
1990
1991         intel_panel_enable_backlight(intel_dp->attached_connector);
1992         _intel_edp_backlight_on(intel_dp);
1993 }
1994
1995 /* Disable backlight in the panel power control. */
1996 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1997 {
1998         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1999         struct drm_i915_private *dev_priv = dev->dev_private;
2000         u32 pp;
2001         u32 pp_ctrl_reg;
2002
2003         if (!is_edp(intel_dp))
2004                 return;
2005
2006         pps_lock(intel_dp);
2007
2008         pp = ironlake_get_pp_control(intel_dp);
2009         pp &= ~EDP_BLC_ENABLE;
2010
2011         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2012
2013         I915_WRITE(pp_ctrl_reg, pp);
2014         POSTING_READ(pp_ctrl_reg);
2015
2016         pps_unlock(intel_dp);
2017
2018         intel_dp->last_backlight_off = jiffies;
2019         edp_wait_backlight_off(intel_dp);
2020 }
2021
2022 /* Disable backlight PP control and backlight PWM. */
2023 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2024 {
2025         if (!is_edp(intel_dp))
2026                 return;
2027
2028         DRM_DEBUG_KMS("\n");
2029
2030         _intel_edp_backlight_off(intel_dp);
2031         intel_panel_disable_backlight(intel_dp->attached_connector);
2032 }
2033
2034 /*
2035  * Hook for controlling the panel power control backlight through the bl_power
2036  * sysfs attribute. Take care to handle multiple calls.
2037  */
2038 static void intel_edp_backlight_power(struct intel_connector *connector,
2039                                       bool enable)
2040 {
2041         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2042         bool is_enabled;
2043
2044         pps_lock(intel_dp);
2045         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2046         pps_unlock(intel_dp);
2047
2048         if (is_enabled == enable)
2049                 return;
2050
2051         DRM_DEBUG_KMS("panel power control backlight %s\n",
2052                       enable ? "enable" : "disable");
2053
2054         if (enable)
2055                 _intel_edp_backlight_on(intel_dp);
2056         else
2057                 _intel_edp_backlight_off(intel_dp);
2058 }
2059
2060 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2061 {
2062         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2063         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2064         struct drm_device *dev = crtc->dev;
2065         struct drm_i915_private *dev_priv = dev->dev_private;
2066         u32 dpa_ctl;
2067
2068         assert_pipe_disabled(dev_priv,
2069                              to_intel_crtc(crtc)->pipe);
2070
2071         DRM_DEBUG_KMS("\n");
2072         dpa_ctl = I915_READ(DP_A);
2073         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2074         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2075
2076         /* We don't adjust intel_dp->DP while tearing down the link, to
2077          * facilitate link retraining (e.g. after hotplug). Hence clear all
2078          * enable bits here to ensure that we don't enable too much. */
2079         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2080         intel_dp->DP |= DP_PLL_ENABLE;
2081         I915_WRITE(DP_A, intel_dp->DP);
2082         POSTING_READ(DP_A);
2083         udelay(200);
2084 }
2085
2086 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2087 {
2088         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2090         struct drm_device *dev = crtc->dev;
2091         struct drm_i915_private *dev_priv = dev->dev_private;
2092         u32 dpa_ctl;
2093
2094         assert_pipe_disabled(dev_priv,
2095                              to_intel_crtc(crtc)->pipe);
2096
2097         dpa_ctl = I915_READ(DP_A);
2098         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2099              "dp pll off, should be on\n");
2100         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2101
2102         /* We can't rely on the value tracked for the DP register in
2103          * intel_dp->DP because link_down must not change that (otherwise link
2104          * re-training will fail. */
2105         dpa_ctl &= ~DP_PLL_ENABLE;
2106         I915_WRITE(DP_A, dpa_ctl);
2107         POSTING_READ(DP_A);
2108         udelay(200);
2109 }
2110
2111 /* If the sink supports it, try to set the power state appropriately */
2112 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2113 {
2114         int ret, i;
2115
2116         /* Should have a valid DPCD by this point */
2117         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2118                 return;
2119
2120         if (mode != DRM_MODE_DPMS_ON) {
2121                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2122                                          DP_SET_POWER_D3);
2123         } else {
2124                 /*
2125                  * When turning on, we need to retry for 1ms to give the sink
2126                  * time to wake up.
2127                  */
2128                 for (i = 0; i < 3; i++) {
2129                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2130                                                  DP_SET_POWER_D0);
2131                         if (ret == 1)
2132                                 break;
2133                         msleep(1);
2134                 }
2135         }
2136
2137         if (ret != 1)
2138                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2139                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2140 }
2141
2142 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2143                                   enum pipe *pipe)
2144 {
2145         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2146         enum port port = dp_to_dig_port(intel_dp)->port;
2147         struct drm_device *dev = encoder->base.dev;
2148         struct drm_i915_private *dev_priv = dev->dev_private;
2149         enum intel_display_power_domain power_domain;
2150         u32 tmp;
2151
2152         power_domain = intel_display_port_power_domain(encoder);
2153         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2154                 return false;
2155
2156         tmp = I915_READ(intel_dp->output_reg);
2157
2158         if (!(tmp & DP_PORT_EN))
2159                 return false;
2160
2161         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2162                 *pipe = PORT_TO_PIPE_CPT(tmp);
2163         } else if (IS_CHERRYVIEW(dev)) {
2164                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2165         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2166                 *pipe = PORT_TO_PIPE(tmp);
2167         } else {
2168                 u32 trans_sel;
2169                 u32 trans_dp;
2170                 int i;
2171
2172                 switch (intel_dp->output_reg) {
2173                 case PCH_DP_B:
2174                         trans_sel = TRANS_DP_PORT_SEL_B;
2175                         break;
2176                 case PCH_DP_C:
2177                         trans_sel = TRANS_DP_PORT_SEL_C;
2178                         break;
2179                 case PCH_DP_D:
2180                         trans_sel = TRANS_DP_PORT_SEL_D;
2181                         break;
2182                 default:
2183                         return true;
2184                 }
2185
2186                 for_each_pipe(dev_priv, i) {
2187                         trans_dp = I915_READ(TRANS_DP_CTL(i));
2188                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2189                                 *pipe = i;
2190                                 return true;
2191                         }
2192                 }
2193
2194                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2195                               intel_dp->output_reg);
2196         }
2197
2198         return true;
2199 }
2200
2201 static void intel_dp_get_config(struct intel_encoder *encoder,
2202                                 struct intel_crtc_state *pipe_config)
2203 {
2204         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2205         u32 tmp, flags = 0;
2206         struct drm_device *dev = encoder->base.dev;
2207         struct drm_i915_private *dev_priv = dev->dev_private;
2208         enum port port = dp_to_dig_port(intel_dp)->port;
2209         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2210         int dotclock;
2211
2212         tmp = I915_READ(intel_dp->output_reg);
2213
2214         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2215
2216         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2217                 if (tmp & DP_SYNC_HS_HIGH)
2218                         flags |= DRM_MODE_FLAG_PHSYNC;
2219                 else
2220                         flags |= DRM_MODE_FLAG_NHSYNC;
2221
2222                 if (tmp & DP_SYNC_VS_HIGH)
2223                         flags |= DRM_MODE_FLAG_PVSYNC;
2224                 else
2225                         flags |= DRM_MODE_FLAG_NVSYNC;
2226         } else {
2227                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2228                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2229                         flags |= DRM_MODE_FLAG_PHSYNC;
2230                 else
2231                         flags |= DRM_MODE_FLAG_NHSYNC;
2232
2233                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2234                         flags |= DRM_MODE_FLAG_PVSYNC;
2235                 else
2236                         flags |= DRM_MODE_FLAG_NVSYNC;
2237         }
2238
2239         pipe_config->base.adjusted_mode.flags |= flags;
2240
2241         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2242             tmp & DP_COLOR_RANGE_16_235)
2243                 pipe_config->limited_color_range = true;
2244
2245         pipe_config->has_dp_encoder = true;
2246
2247         intel_dp_get_m_n(crtc, pipe_config);
2248
2249         if (port == PORT_A) {
2250                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2251                         pipe_config->port_clock = 162000;
2252                 else
2253                         pipe_config->port_clock = 270000;
2254         }
2255
2256         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2257                                             &pipe_config->dp_m_n);
2258
2259         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2260                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2261
2262         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2263
2264         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2265             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2266                 /*
2267                  * This is a big fat ugly hack.
2268                  *
2269                  * Some machines in UEFI boot mode provide us a VBT that has 18
2270                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2271                  * unknown we fail to light up. Yet the same BIOS boots up with
2272                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2273                  * max, not what it tells us to use.
2274                  *
2275                  * Note: This will still be broken if the eDP panel is not lit
2276                  * up by the BIOS, and thus we can't get the mode at module
2277                  * load.
2278                  */
2279                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2280                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2281                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2282         }
2283 }
2284
2285 static void intel_disable_dp(struct intel_encoder *encoder)
2286 {
2287         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2288         struct drm_device *dev = encoder->base.dev;
2289         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2290
2291         if (crtc->config->has_audio)
2292                 intel_audio_codec_disable(encoder);
2293
2294         if (HAS_PSR(dev) && !HAS_DDI(dev))
2295                 intel_psr_disable(intel_dp);
2296
2297         /* Make sure the panel is off before trying to change the mode. But also
2298          * ensure that we have vdd while we switch off the panel. */
2299         intel_edp_panel_vdd_on(intel_dp);
2300         intel_edp_backlight_off(intel_dp);
2301         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2302         intel_edp_panel_off(intel_dp);
2303
2304         /* disable the port before the pipe on g4x */
2305         if (INTEL_INFO(dev)->gen < 5)
2306                 intel_dp_link_down(intel_dp);
2307 }
2308
2309 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2310 {
2311         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2312         enum port port = dp_to_dig_port(intel_dp)->port;
2313
2314         intel_dp_link_down(intel_dp);
2315         if (port == PORT_A)
2316                 ironlake_edp_pll_off(intel_dp);
2317 }
2318
2319 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2320 {
2321         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2322
2323         intel_dp_link_down(intel_dp);
2324 }
2325
2326 static void chv_post_disable_dp(struct intel_encoder *encoder)
2327 {
2328         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2329         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2330         struct drm_device *dev = encoder->base.dev;
2331         struct drm_i915_private *dev_priv = dev->dev_private;
2332         struct intel_crtc *intel_crtc =
2333                 to_intel_crtc(encoder->base.crtc);
2334         enum dpio_channel ch = vlv_dport_to_channel(dport);
2335         enum pipe pipe = intel_crtc->pipe;
2336         u32 val;
2337
2338         intel_dp_link_down(intel_dp);
2339
2340         mutex_lock(&dev_priv->dpio_lock);
2341
2342         /* Propagate soft reset to data lane reset */
2343         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2344         val |= CHV_PCS_REQ_SOFTRESET_EN;
2345         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2346
2347         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2348         val |= CHV_PCS_REQ_SOFTRESET_EN;
2349         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2350
2351         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2352         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2353         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2354
2355         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2356         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2357         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2358
2359         mutex_unlock(&dev_priv->dpio_lock);
2360 }
2361
2362 static void
2363 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2364                          uint32_t *DP,
2365                          uint8_t dp_train_pat)
2366 {
2367         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2368         struct drm_device *dev = intel_dig_port->base.base.dev;
2369         struct drm_i915_private *dev_priv = dev->dev_private;
2370         enum port port = intel_dig_port->port;
2371
2372         if (HAS_DDI(dev)) {
2373                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2374
2375                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2376                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2377                 else
2378                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2379
2380                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2381                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2382                 case DP_TRAINING_PATTERN_DISABLE:
2383                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2384
2385                         break;
2386                 case DP_TRAINING_PATTERN_1:
2387                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2388                         break;
2389                 case DP_TRAINING_PATTERN_2:
2390                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2391                         break;
2392                 case DP_TRAINING_PATTERN_3:
2393                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2394                         break;
2395                 }
2396                 I915_WRITE(DP_TP_CTL(port), temp);
2397
2398         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2399                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2400
2401                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2402                 case DP_TRAINING_PATTERN_DISABLE:
2403                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2404                         break;
2405                 case DP_TRAINING_PATTERN_1:
2406                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2407                         break;
2408                 case DP_TRAINING_PATTERN_2:
2409                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2410                         break;
2411                 case DP_TRAINING_PATTERN_3:
2412                         DRM_ERROR("DP training pattern 3 not supported\n");
2413                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2414                         break;
2415                 }
2416
2417         } else {
2418                 if (IS_CHERRYVIEW(dev))
2419                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2420                 else
2421                         *DP &= ~DP_LINK_TRAIN_MASK;
2422
2423                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2424                 case DP_TRAINING_PATTERN_DISABLE:
2425                         *DP |= DP_LINK_TRAIN_OFF;
2426                         break;
2427                 case DP_TRAINING_PATTERN_1:
2428                         *DP |= DP_LINK_TRAIN_PAT_1;
2429                         break;
2430                 case DP_TRAINING_PATTERN_2:
2431                         *DP |= DP_LINK_TRAIN_PAT_2;
2432                         break;
2433                 case DP_TRAINING_PATTERN_3:
2434                         if (IS_CHERRYVIEW(dev)) {
2435                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2436                         } else {
2437                                 DRM_ERROR("DP training pattern 3 not supported\n");
2438                                 *DP |= DP_LINK_TRAIN_PAT_2;
2439                         }
2440                         break;
2441                 }
2442         }
2443 }
2444
2445 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2446 {
2447         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2448         struct drm_i915_private *dev_priv = dev->dev_private;
2449
2450         /* enable with pattern 1 (as per spec) */
2451         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2452                                  DP_TRAINING_PATTERN_1);
2453
2454         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2455         POSTING_READ(intel_dp->output_reg);
2456
2457         /*
2458          * Magic for VLV/CHV. We _must_ first set up the register
2459          * without actually enabling the port, and then do another
2460          * write to enable the port. Otherwise link training will
2461          * fail when the power sequencer is freshly used for this port.
2462          */
2463         intel_dp->DP |= DP_PORT_EN;
2464
2465         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2466         POSTING_READ(intel_dp->output_reg);
2467 }
2468
2469 static void intel_enable_dp(struct intel_encoder *encoder)
2470 {
2471         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2472         struct drm_device *dev = encoder->base.dev;
2473         struct drm_i915_private *dev_priv = dev->dev_private;
2474         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2475         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2476
2477         if (WARN_ON(dp_reg & DP_PORT_EN))
2478                 return;
2479
2480         pps_lock(intel_dp);
2481
2482         if (IS_VALLEYVIEW(dev))
2483                 vlv_init_panel_power_sequencer(intel_dp);
2484
2485         intel_dp_enable_port(intel_dp);
2486
2487         edp_panel_vdd_on(intel_dp);
2488         edp_panel_on(intel_dp);
2489         edp_panel_vdd_off(intel_dp, true);
2490
2491         pps_unlock(intel_dp);
2492
2493         if (IS_VALLEYVIEW(dev))
2494                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2495
2496         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2497         intel_dp_start_link_train(intel_dp);
2498         intel_dp_complete_link_train(intel_dp);
2499         intel_dp_stop_link_train(intel_dp);
2500
2501         if (crtc->config->has_audio) {
2502                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2503                                  pipe_name(crtc->pipe));
2504                 intel_audio_codec_enable(encoder);
2505         }
2506 }
2507
2508 static void g4x_enable_dp(struct intel_encoder *encoder)
2509 {
2510         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2511
2512         intel_enable_dp(encoder);
2513         intel_edp_backlight_on(intel_dp);
2514 }
2515
2516 static void vlv_enable_dp(struct intel_encoder *encoder)
2517 {
2518         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2519
2520         intel_edp_backlight_on(intel_dp);
2521         intel_psr_enable(intel_dp);
2522 }
2523
2524 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2525 {
2526         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2527         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2528
2529         intel_dp_prepare(encoder);
2530
2531         /* Only ilk+ has port A */
2532         if (dport->port == PORT_A) {
2533                 ironlake_set_pll_cpu_edp(intel_dp);
2534                 ironlake_edp_pll_on(intel_dp);
2535         }
2536 }
2537
2538 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2539 {
2540         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2541         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2542         enum pipe pipe = intel_dp->pps_pipe;
2543         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2544
2545         edp_panel_vdd_off_sync(intel_dp);
2546
2547         /*
2548          * VLV seems to get confused when multiple power seqeuencers
2549          * have the same port selected (even if only one has power/vdd
2550          * enabled). The failure manifests as vlv_wait_port_ready() failing
2551          * CHV on the other hand doesn't seem to mind having the same port
2552          * selected in multiple power seqeuencers, but let's clear the
2553          * port select always when logically disconnecting a power sequencer
2554          * from a port.
2555          */
2556         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2557                       pipe_name(pipe), port_name(intel_dig_port->port));
2558         I915_WRITE(pp_on_reg, 0);
2559         POSTING_READ(pp_on_reg);
2560
2561         intel_dp->pps_pipe = INVALID_PIPE;
2562 }
2563
2564 static void vlv_steal_power_sequencer(struct drm_device *dev,
2565                                       enum pipe pipe)
2566 {
2567         struct drm_i915_private *dev_priv = dev->dev_private;
2568         struct intel_encoder *encoder;
2569
2570         lockdep_assert_held(&dev_priv->pps_mutex);
2571
2572         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2573                 return;
2574
2575         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2576                             base.head) {
2577                 struct intel_dp *intel_dp;
2578                 enum port port;
2579
2580                 if (encoder->type != INTEL_OUTPUT_EDP)
2581                         continue;
2582
2583                 intel_dp = enc_to_intel_dp(&encoder->base);
2584                 port = dp_to_dig_port(intel_dp)->port;
2585
2586                 if (intel_dp->pps_pipe != pipe)
2587                         continue;
2588
2589                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2590                               pipe_name(pipe), port_name(port));
2591
2592                 WARN(encoder->connectors_active,
2593                      "stealing pipe %c power sequencer from active eDP port %c\n",
2594                      pipe_name(pipe), port_name(port));
2595
2596                 /* make sure vdd is off before we steal it */
2597                 vlv_detach_power_sequencer(intel_dp);
2598         }
2599 }
2600
2601 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2602 {
2603         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2604         struct intel_encoder *encoder = &intel_dig_port->base;
2605         struct drm_device *dev = encoder->base.dev;
2606         struct drm_i915_private *dev_priv = dev->dev_private;
2607         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2608
2609         lockdep_assert_held(&dev_priv->pps_mutex);
2610
2611         if (!is_edp(intel_dp))
2612                 return;
2613
2614         if (intel_dp->pps_pipe == crtc->pipe)
2615                 return;
2616
2617         /*
2618          * If another power sequencer was being used on this
2619          * port previously make sure to turn off vdd there while
2620          * we still have control of it.
2621          */
2622         if (intel_dp->pps_pipe != INVALID_PIPE)
2623                 vlv_detach_power_sequencer(intel_dp);
2624
2625         /*
2626          * We may be stealing the power
2627          * sequencer from another port.
2628          */
2629         vlv_steal_power_sequencer(dev, crtc->pipe);
2630
2631         /* now it's all ours */
2632         intel_dp->pps_pipe = crtc->pipe;
2633
2634         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2635                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2636
2637         /* init power sequencer on this pipe and port */
2638         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2639         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2640 }
2641
2642 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2643 {
2644         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2645         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2646         struct drm_device *dev = encoder->base.dev;
2647         struct drm_i915_private *dev_priv = dev->dev_private;
2648         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2649         enum dpio_channel port = vlv_dport_to_channel(dport);
2650         int pipe = intel_crtc->pipe;
2651         u32 val;
2652
2653         mutex_lock(&dev_priv->dpio_lock);
2654
2655         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2656         val = 0;
2657         if (pipe)
2658                 val |= (1<<21);
2659         else
2660                 val &= ~(1<<21);
2661         val |= 0x001000c4;
2662         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2663         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2664         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2665
2666         mutex_unlock(&dev_priv->dpio_lock);
2667
2668         intel_enable_dp(encoder);
2669 }
2670
2671 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2672 {
2673         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2674         struct drm_device *dev = encoder->base.dev;
2675         struct drm_i915_private *dev_priv = dev->dev_private;
2676         struct intel_crtc *intel_crtc =
2677                 to_intel_crtc(encoder->base.crtc);
2678         enum dpio_channel port = vlv_dport_to_channel(dport);
2679         int pipe = intel_crtc->pipe;
2680
2681         intel_dp_prepare(encoder);
2682
2683         /* Program Tx lane resets to default */
2684         mutex_lock(&dev_priv->dpio_lock);
2685         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2686                          DPIO_PCS_TX_LANE2_RESET |
2687                          DPIO_PCS_TX_LANE1_RESET);
2688         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2689                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2690                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2691                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2692                                  DPIO_PCS_CLK_SOFT_RESET);
2693
2694         /* Fix up inter-pair skew failure */
2695         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2696         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2697         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2698         mutex_unlock(&dev_priv->dpio_lock);
2699 }
2700
2701 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2702 {
2703         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2704         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2705         struct drm_device *dev = encoder->base.dev;
2706         struct drm_i915_private *dev_priv = dev->dev_private;
2707         struct intel_crtc *intel_crtc =
2708                 to_intel_crtc(encoder->base.crtc);
2709         enum dpio_channel ch = vlv_dport_to_channel(dport);
2710         int pipe = intel_crtc->pipe;
2711         int data, i;
2712         u32 val;
2713
2714         mutex_lock(&dev_priv->dpio_lock);
2715
2716         /* allow hardware to manage TX FIFO reset source */
2717         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2718         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2719         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2720
2721         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2722         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2723         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2724
2725         /* Deassert soft data lane reset*/
2726         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2727         val |= CHV_PCS_REQ_SOFTRESET_EN;
2728         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2729
2730         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2731         val |= CHV_PCS_REQ_SOFTRESET_EN;
2732         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2733
2734         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2735         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2736         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2737
2738         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2739         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2740         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2741
2742         /* Program Tx lane latency optimal setting*/
2743         for (i = 0; i < 4; i++) {
2744                 /* Set the upar bit */
2745                 data = (i == 1) ? 0x0 : 0x1;
2746                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2747                                 data << DPIO_UPAR_SHIFT);
2748         }
2749
2750         /* Data lane stagger programming */
2751         /* FIXME: Fix up value only after power analysis */
2752
2753         mutex_unlock(&dev_priv->dpio_lock);
2754
2755         intel_enable_dp(encoder);
2756 }
2757
2758 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2759 {
2760         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2761         struct drm_device *dev = encoder->base.dev;
2762         struct drm_i915_private *dev_priv = dev->dev_private;
2763         struct intel_crtc *intel_crtc =
2764                 to_intel_crtc(encoder->base.crtc);
2765         enum dpio_channel ch = vlv_dport_to_channel(dport);
2766         enum pipe pipe = intel_crtc->pipe;
2767         u32 val;
2768
2769         intel_dp_prepare(encoder);
2770
2771         mutex_lock(&dev_priv->dpio_lock);
2772
2773         /* program left/right clock distribution */
2774         if (pipe != PIPE_B) {
2775                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2776                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2777                 if (ch == DPIO_CH0)
2778                         val |= CHV_BUFLEFTENA1_FORCE;
2779                 if (ch == DPIO_CH1)
2780                         val |= CHV_BUFRIGHTENA1_FORCE;
2781                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2782         } else {
2783                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2784                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2785                 if (ch == DPIO_CH0)
2786                         val |= CHV_BUFLEFTENA2_FORCE;
2787                 if (ch == DPIO_CH1)
2788                         val |= CHV_BUFRIGHTENA2_FORCE;
2789                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2790         }
2791
2792         /* program clock channel usage */
2793         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2794         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2795         if (pipe != PIPE_B)
2796                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2797         else
2798                 val |= CHV_PCS_USEDCLKCHANNEL;
2799         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2800
2801         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2802         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2803         if (pipe != PIPE_B)
2804                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2805         else
2806                 val |= CHV_PCS_USEDCLKCHANNEL;
2807         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2808
2809         /*
2810          * This a a bit weird since generally CL
2811          * matches the pipe, but here we need to
2812          * pick the CL based on the port.
2813          */
2814         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2815         if (pipe != PIPE_B)
2816                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2817         else
2818                 val |= CHV_CMN_USEDCLKCHANNEL;
2819         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2820
2821         mutex_unlock(&dev_priv->dpio_lock);
2822 }
2823
2824 /*
2825  * Native read with retry for link status and receiver capability reads for
2826  * cases where the sink may still be asleep.
2827  *
2828  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2829  * supposed to retry 3 times per the spec.
2830  */
2831 static ssize_t
2832 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2833                         void *buffer, size_t size)
2834 {
2835         ssize_t ret;
2836         int i;
2837
2838         /*
2839          * Sometime we just get the same incorrect byte repeated
2840          * over the entire buffer. Doing just one throw away read
2841          * initially seems to "solve" it.
2842          */
2843         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2844
2845         for (i = 0; i < 3; i++) {
2846                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2847                 if (ret == size)
2848                         return ret;
2849                 msleep(1);
2850         }
2851
2852         return ret;
2853 }
2854
2855 /*
2856  * Fetch AUX CH registers 0x202 - 0x207 which contain
2857  * link status information
2858  */
2859 static bool
2860 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2861 {
2862         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2863                                        DP_LANE0_1_STATUS,
2864                                        link_status,
2865                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2866 }
2867
2868 /* These are source-specific values. */
2869 static uint8_t
2870 intel_dp_voltage_max(struct intel_dp *intel_dp)
2871 {
2872         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2873         struct drm_i915_private *dev_priv = dev->dev_private;
2874         enum port port = dp_to_dig_port(intel_dp)->port;
2875
2876         if (INTEL_INFO(dev)->gen >= 9) {
2877                 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2878                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2879                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2880         } else if (IS_VALLEYVIEW(dev))
2881                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2882         else if (IS_GEN7(dev) && port == PORT_A)
2883                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2884         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2885                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2886         else
2887                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2888 }
2889
2890 static uint8_t
2891 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2892 {
2893         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2894         enum port port = dp_to_dig_port(intel_dp)->port;
2895
2896         if (INTEL_INFO(dev)->gen >= 9) {
2897                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2898                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2899                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2900                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2901                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2902                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2903                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2904                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2905                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2906                 default:
2907                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2908                 }
2909         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2910                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2911                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2912                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2913                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2914                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2915                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2916                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2917                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2918                 default:
2919                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2920                 }
2921         } else if (IS_VALLEYVIEW(dev)) {
2922                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2923                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2924                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2925                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2926                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2927                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2928                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2929                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2930                 default:
2931                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2932                 }
2933         } else if (IS_GEN7(dev) && port == PORT_A) {
2934                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2935                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2936                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2937                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2938                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2939                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2940                 default:
2941                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2942                 }
2943         } else {
2944                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2945                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2946                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2947                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2948                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2949                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2950                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2951                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2952                 default:
2953                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2954                 }
2955         }
2956 }
2957
2958 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2959 {
2960         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2961         struct drm_i915_private *dev_priv = dev->dev_private;
2962         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2963         struct intel_crtc *intel_crtc =
2964                 to_intel_crtc(dport->base.base.crtc);
2965         unsigned long demph_reg_value, preemph_reg_value,
2966                 uniqtranscale_reg_value;
2967         uint8_t train_set = intel_dp->train_set[0];
2968         enum dpio_channel port = vlv_dport_to_channel(dport);
2969         int pipe = intel_crtc->pipe;
2970
2971         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2972         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2973                 preemph_reg_value = 0x0004000;
2974                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2975                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2976                         demph_reg_value = 0x2B405555;
2977                         uniqtranscale_reg_value = 0x552AB83A;
2978                         break;
2979                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2980                         demph_reg_value = 0x2B404040;
2981                         uniqtranscale_reg_value = 0x5548B83A;
2982                         break;
2983                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2984                         demph_reg_value = 0x2B245555;
2985                         uniqtranscale_reg_value = 0x5560B83A;
2986                         break;
2987                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2988                         demph_reg_value = 0x2B405555;
2989                         uniqtranscale_reg_value = 0x5598DA3A;
2990                         break;
2991                 default:
2992                         return 0;
2993                 }
2994                 break;
2995         case DP_TRAIN_PRE_EMPH_LEVEL_1:
2996                 preemph_reg_value = 0x0002000;
2997                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2998                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2999                         demph_reg_value = 0x2B404040;
3000                         uniqtranscale_reg_value = 0x5552B83A;
3001                         break;
3002                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3003                         demph_reg_value = 0x2B404848;
3004                         uniqtranscale_reg_value = 0x5580B83A;
3005                         break;
3006                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3007                         demph_reg_value = 0x2B404040;
3008                         uniqtranscale_reg_value = 0x55ADDA3A;
3009                         break;
3010                 default:
3011                         return 0;
3012                 }
3013                 break;
3014         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3015                 preemph_reg_value = 0x0000000;
3016                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3017                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3018                         demph_reg_value = 0x2B305555;
3019                         uniqtranscale_reg_value = 0x5570B83A;
3020                         break;
3021                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3022                         demph_reg_value = 0x2B2B4040;
3023                         uniqtranscale_reg_value = 0x55ADDA3A;
3024                         break;
3025                 default:
3026                         return 0;
3027                 }
3028                 break;
3029         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3030                 preemph_reg_value = 0x0006000;
3031                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3032                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3033                         demph_reg_value = 0x1B405555;
3034                         uniqtranscale_reg_value = 0x55ADDA3A;
3035                         break;
3036                 default:
3037                         return 0;
3038                 }
3039                 break;
3040         default:
3041                 return 0;
3042         }
3043
3044         mutex_lock(&dev_priv->dpio_lock);
3045         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3046         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3047         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3048                          uniqtranscale_reg_value);
3049         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3050         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3051         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3052         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3053         mutex_unlock(&dev_priv->dpio_lock);
3054
3055         return 0;
3056 }
3057
3058 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3059 {
3060         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3061         struct drm_i915_private *dev_priv = dev->dev_private;
3062         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3063         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3064         u32 deemph_reg_value, margin_reg_value, val;
3065         uint8_t train_set = intel_dp->train_set[0];
3066         enum dpio_channel ch = vlv_dport_to_channel(dport);
3067         enum pipe pipe = intel_crtc->pipe;
3068         int i;
3069
3070         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3071         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3072                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3073                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3074                         deemph_reg_value = 128;
3075                         margin_reg_value = 52;
3076                         break;
3077                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3078                         deemph_reg_value = 128;
3079                         margin_reg_value = 77;
3080                         break;
3081                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3082                         deemph_reg_value = 128;
3083                         margin_reg_value = 102;
3084                         break;
3085                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3086                         deemph_reg_value = 128;
3087                         margin_reg_value = 154;
3088                         /* FIXME extra to set for 1200 */
3089                         break;
3090                 default:
3091                         return 0;
3092                 }
3093                 break;
3094         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3095                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3096                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3097                         deemph_reg_value = 85;
3098                         margin_reg_value = 78;
3099                         break;
3100                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3101                         deemph_reg_value = 85;
3102                         margin_reg_value = 116;
3103                         break;
3104                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3105                         deemph_reg_value = 85;
3106                         margin_reg_value = 154;
3107                         break;
3108                 default:
3109                         return 0;
3110                 }
3111                 break;
3112         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3113                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3114                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3115                         deemph_reg_value = 64;
3116                         margin_reg_value = 104;
3117                         break;
3118                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3119                         deemph_reg_value = 64;
3120                         margin_reg_value = 154;
3121                         break;
3122                 default:
3123                         return 0;
3124                 }
3125                 break;
3126         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3127                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3128                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3129                         deemph_reg_value = 43;
3130                         margin_reg_value = 154;
3131                         break;
3132                 default:
3133                         return 0;
3134                 }
3135                 break;
3136         default:
3137                 return 0;
3138         }
3139
3140         mutex_lock(&dev_priv->dpio_lock);
3141
3142         /* Clear calc init */
3143         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3144         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3145         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3146         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3147         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3148
3149         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3150         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3151         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3152         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3153         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3154
3155         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3156         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3157         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3158         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3159
3160         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3161         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3162         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3163         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3164
3165         /* Program swing deemph */
3166         for (i = 0; i < 4; i++) {
3167                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3168                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3169                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3170                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3171         }
3172
3173         /* Program swing margin */
3174         for (i = 0; i < 4; i++) {
3175                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3176                 val &= ~DPIO_SWING_MARGIN000_MASK;
3177                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3178                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3179         }
3180
3181         /* Disable unique transition scale */
3182         for (i = 0; i < 4; i++) {
3183                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3184                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3185                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3186         }
3187
3188         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3189                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3190                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3191                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3192
3193                 /*
3194                  * The document said it needs to set bit 27 for ch0 and bit 26
3195                  * for ch1. Might be a typo in the doc.
3196                  * For now, for this unique transition scale selection, set bit
3197                  * 27 for ch0 and ch1.
3198                  */
3199                 for (i = 0; i < 4; i++) {
3200                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3201                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3202                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3203                 }
3204
3205                 for (i = 0; i < 4; i++) {
3206                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3207                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3208                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3209                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3210                 }
3211         }
3212
3213         /* Start swing calculation */
3214         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3215         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3216         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3217
3218         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3219         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3220         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3221
3222         /* LRC Bypass */
3223         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3224         val |= DPIO_LRC_BYPASS;
3225         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3226
3227         mutex_unlock(&dev_priv->dpio_lock);
3228
3229         return 0;
3230 }
3231
3232 static void
3233 intel_get_adjust_train(struct intel_dp *intel_dp,
3234                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3235 {
3236         uint8_t v = 0;
3237         uint8_t p = 0;
3238         int lane;
3239         uint8_t voltage_max;
3240         uint8_t preemph_max;
3241
3242         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3243                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3244                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3245
3246                 if (this_v > v)
3247                         v = this_v;
3248                 if (this_p > p)
3249                         p = this_p;
3250         }
3251
3252         voltage_max = intel_dp_voltage_max(intel_dp);
3253         if (v >= voltage_max)
3254                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3255
3256         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3257         if (p >= preemph_max)
3258                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3259
3260         for (lane = 0; lane < 4; lane++)
3261                 intel_dp->train_set[lane] = v | p;
3262 }
3263
3264 static uint32_t
3265 intel_gen4_signal_levels(uint8_t train_set)
3266 {
3267         uint32_t        signal_levels = 0;
3268
3269         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3270         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3271         default:
3272                 signal_levels |= DP_VOLTAGE_0_4;
3273                 break;
3274         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3275                 signal_levels |= DP_VOLTAGE_0_6;
3276                 break;
3277         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3278                 signal_levels |= DP_VOLTAGE_0_8;
3279                 break;
3280         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3281                 signal_levels |= DP_VOLTAGE_1_2;
3282                 break;
3283         }
3284         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3285         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3286         default:
3287                 signal_levels |= DP_PRE_EMPHASIS_0;
3288                 break;
3289         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3290                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3291                 break;
3292         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3293                 signal_levels |= DP_PRE_EMPHASIS_6;
3294                 break;
3295         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3296                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3297                 break;
3298         }
3299         return signal_levels;
3300 }
3301
3302 /* Gen6's DP voltage swing and pre-emphasis control */
3303 static uint32_t
3304 intel_gen6_edp_signal_levels(uint8_t train_set)
3305 {
3306         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3307                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3308         switch (signal_levels) {
3309         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3310         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3311                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3312         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3313                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3314         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3315         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3316                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3317         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3318         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3320         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3321         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3322                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3323         default:
3324                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3325                               "0x%x\n", signal_levels);
3326                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3327         }
3328 }
3329
3330 /* Gen7's DP voltage swing and pre-emphasis control */
3331 static uint32_t
3332 intel_gen7_edp_signal_levels(uint8_t train_set)
3333 {
3334         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3335                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3336         switch (signal_levels) {
3337         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3338                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3339         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3340                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3341         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3342                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3343
3344         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3345                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3346         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3347                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3348
3349         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3350                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3351         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3352                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3353
3354         default:
3355                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3356                               "0x%x\n", signal_levels);
3357                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3358         }
3359 }
3360
3361 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3362 static uint32_t
3363 intel_hsw_signal_levels(uint8_t train_set)
3364 {
3365         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3366                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3367         switch (signal_levels) {
3368         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3369                 return DDI_BUF_TRANS_SELECT(0);
3370         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3371                 return DDI_BUF_TRANS_SELECT(1);
3372         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3373                 return DDI_BUF_TRANS_SELECT(2);
3374         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3375                 return DDI_BUF_TRANS_SELECT(3);
3376
3377         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3378                 return DDI_BUF_TRANS_SELECT(4);
3379         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3380                 return DDI_BUF_TRANS_SELECT(5);
3381         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3382                 return DDI_BUF_TRANS_SELECT(6);
3383
3384         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3385                 return DDI_BUF_TRANS_SELECT(7);
3386         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3387                 return DDI_BUF_TRANS_SELECT(8);
3388
3389         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3390                 return DDI_BUF_TRANS_SELECT(9);
3391         default:
3392                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3393                               "0x%x\n", signal_levels);
3394                 return DDI_BUF_TRANS_SELECT(0);
3395         }
3396 }
3397
3398 /* Properly updates "DP" with the correct signal levels. */
3399 static void
3400 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3401 {
3402         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3403         enum port port = intel_dig_port->port;
3404         struct drm_device *dev = intel_dig_port->base.base.dev;
3405         uint32_t signal_levels, mask;
3406         uint8_t train_set = intel_dp->train_set[0];
3407
3408         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3409                 signal_levels = intel_hsw_signal_levels(train_set);
3410                 mask = DDI_BUF_EMP_MASK;
3411         } else if (IS_CHERRYVIEW(dev)) {
3412                 signal_levels = intel_chv_signal_levels(intel_dp);
3413                 mask = 0;
3414         } else if (IS_VALLEYVIEW(dev)) {
3415                 signal_levels = intel_vlv_signal_levels(intel_dp);
3416                 mask = 0;
3417         } else if (IS_GEN7(dev) && port == PORT_A) {
3418                 signal_levels = intel_gen7_edp_signal_levels(train_set);
3419                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3420         } else if (IS_GEN6(dev) && port == PORT_A) {
3421                 signal_levels = intel_gen6_edp_signal_levels(train_set);
3422                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3423         } else {
3424                 signal_levels = intel_gen4_signal_levels(train_set);
3425                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3426         }
3427
3428         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3429
3430         *DP = (*DP & ~mask) | signal_levels;
3431 }
3432
3433 static bool
3434 intel_dp_set_link_train(struct intel_dp *intel_dp,
3435                         uint32_t *DP,
3436                         uint8_t dp_train_pat)
3437 {
3438         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3439         struct drm_device *dev = intel_dig_port->base.base.dev;
3440         struct drm_i915_private *dev_priv = dev->dev_private;
3441         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3442         int ret, len;
3443
3444         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3445
3446         I915_WRITE(intel_dp->output_reg, *DP);
3447         POSTING_READ(intel_dp->output_reg);
3448
3449         buf[0] = dp_train_pat;
3450         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3451             DP_TRAINING_PATTERN_DISABLE) {
3452                 /* don't write DP_TRAINING_LANEx_SET on disable */
3453                 len = 1;
3454         } else {
3455                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3456                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3457                 len = intel_dp->lane_count + 1;
3458         }
3459
3460         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3461                                 buf, len);
3462
3463         return ret == len;
3464 }
3465
3466 static bool
3467 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3468                         uint8_t dp_train_pat)
3469 {
3470         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3471         intel_dp_set_signal_levels(intel_dp, DP);
3472         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3473 }
3474
3475 static bool
3476 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3477                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3478 {
3479         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3480         struct drm_device *dev = intel_dig_port->base.base.dev;
3481         struct drm_i915_private *dev_priv = dev->dev_private;
3482         int ret;
3483
3484         intel_get_adjust_train(intel_dp, link_status);
3485         intel_dp_set_signal_levels(intel_dp, DP);
3486
3487         I915_WRITE(intel_dp->output_reg, *DP);
3488         POSTING_READ(intel_dp->output_reg);
3489
3490         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3491                                 intel_dp->train_set, intel_dp->lane_count);
3492
3493         return ret == intel_dp->lane_count;
3494 }
3495
3496 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3497 {
3498         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3499         struct drm_device *dev = intel_dig_port->base.base.dev;
3500         struct drm_i915_private *dev_priv = dev->dev_private;
3501         enum port port = intel_dig_port->port;
3502         uint32_t val;
3503
3504         if (!HAS_DDI(dev))
3505                 return;
3506
3507         val = I915_READ(DP_TP_CTL(port));
3508         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3509         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3510         I915_WRITE(DP_TP_CTL(port), val);
3511
3512         /*
3513          * On PORT_A we can have only eDP in SST mode. There the only reason
3514          * we need to set idle transmission mode is to work around a HW issue
3515          * where we enable the pipe while not in idle link-training mode.
3516          * In this case there is requirement to wait for a minimum number of
3517          * idle patterns to be sent.
3518          */
3519         if (port == PORT_A)
3520                 return;
3521
3522         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3523                      1))
3524                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3525 }
3526
3527 /* Enable corresponding port and start training pattern 1 */
3528 void
3529 intel_dp_start_link_train(struct intel_dp *intel_dp)
3530 {
3531         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3532         struct drm_device *dev = encoder->dev;
3533         int i;
3534         uint8_t voltage;
3535         int voltage_tries, loop_tries;
3536         uint32_t DP = intel_dp->DP;
3537         uint8_t link_config[2];
3538
3539         if (HAS_DDI(dev))
3540                 intel_ddi_prepare_link_retrain(encoder);
3541
3542         /* Write the link configuration data */
3543         link_config[0] = intel_dp->link_bw;
3544         link_config[1] = intel_dp->lane_count;
3545         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3546                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3547         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3548         if (intel_dp->num_sink_rates)
3549                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3550                                 &intel_dp->rate_select, 1);
3551
3552         link_config[0] = 0;
3553         link_config[1] = DP_SET_ANSI_8B10B;
3554         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3555
3556         DP |= DP_PORT_EN;
3557
3558         /* clock recovery */
3559         if (!intel_dp_reset_link_train(intel_dp, &DP,
3560                                        DP_TRAINING_PATTERN_1 |
3561                                        DP_LINK_SCRAMBLING_DISABLE)) {
3562                 DRM_ERROR("failed to enable link training\n");
3563                 return;
3564         }
3565
3566         voltage = 0xff;
3567         voltage_tries = 0;
3568         loop_tries = 0;
3569         for (;;) {
3570                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3571
3572                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3573                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3574                         DRM_ERROR("failed to get link status\n");
3575                         break;
3576                 }
3577
3578                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3579                         DRM_DEBUG_KMS("clock recovery OK\n");
3580                         break;
3581                 }
3582
3583                 /* Check to see if we've tried the max voltage */
3584                 for (i = 0; i < intel_dp->lane_count; i++)
3585                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3586                                 break;
3587                 if (i == intel_dp->lane_count) {
3588                         ++loop_tries;
3589                         if (loop_tries == 5) {
3590                                 DRM_ERROR("too many full retries, give up\n");
3591                                 break;
3592                         }
3593                         intel_dp_reset_link_train(intel_dp, &DP,
3594                                                   DP_TRAINING_PATTERN_1 |
3595                                                   DP_LINK_SCRAMBLING_DISABLE);
3596                         voltage_tries = 0;
3597                         continue;
3598                 }
3599
3600                 /* Check to see if we've tried the same voltage 5 times */
3601                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3602                         ++voltage_tries;
3603                         if (voltage_tries == 5) {
3604                                 DRM_ERROR("too many voltage retries, give up\n");
3605                                 break;
3606                         }
3607                 } else
3608                         voltage_tries = 0;
3609                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3610
3611                 /* Update training set as requested by target */
3612                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3613                         DRM_ERROR("failed to update link training\n");
3614                         break;
3615                 }
3616         }
3617
3618         intel_dp->DP = DP;
3619 }
3620
3621 void
3622 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3623 {
3624         bool channel_eq = false;
3625         int tries, cr_tries;
3626         uint32_t DP = intel_dp->DP;
3627         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3628
3629         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3630         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3631                 training_pattern = DP_TRAINING_PATTERN_3;
3632
3633         /* channel equalization */
3634         if (!intel_dp_set_link_train(intel_dp, &DP,
3635                                      training_pattern |
3636                                      DP_LINK_SCRAMBLING_DISABLE)) {
3637                 DRM_ERROR("failed to start channel equalization\n");
3638                 return;
3639         }
3640
3641         tries = 0;
3642         cr_tries = 0;
3643         channel_eq = false;
3644         for (;;) {
3645                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3646
3647                 if (cr_tries > 5) {
3648                         DRM_ERROR("failed to train DP, aborting\n");
3649                         break;
3650                 }
3651
3652                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3653                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3654                         DRM_ERROR("failed to get link status\n");
3655                         break;
3656                 }
3657
3658                 /* Make sure clock is still ok */
3659                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3660                         intel_dp_start_link_train(intel_dp);
3661                         intel_dp_set_link_train(intel_dp, &DP,
3662                                                 training_pattern |
3663                                                 DP_LINK_SCRAMBLING_DISABLE);
3664                         cr_tries++;
3665                         continue;
3666                 }
3667
3668                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3669                         channel_eq = true;
3670                         break;
3671                 }
3672
3673                 /* Try 5 times, then try clock recovery if that fails */
3674                 if (tries > 5) {
3675                         intel_dp_start_link_train(intel_dp);
3676                         intel_dp_set_link_train(intel_dp, &DP,
3677                                                 training_pattern |
3678                                                 DP_LINK_SCRAMBLING_DISABLE);
3679                         tries = 0;
3680                         cr_tries++;
3681                         continue;
3682                 }
3683
3684                 /* Update training set as requested by target */
3685                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3686                         DRM_ERROR("failed to update link training\n");
3687                         break;
3688                 }
3689                 ++tries;
3690         }
3691
3692         intel_dp_set_idle_link_train(intel_dp);
3693
3694         intel_dp->DP = DP;
3695
3696         if (channel_eq)
3697                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3698
3699 }
3700
3701 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3702 {
3703         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3704                                 DP_TRAINING_PATTERN_DISABLE);
3705 }
3706
3707 static void
3708 intel_dp_link_down(struct intel_dp *intel_dp)
3709 {
3710         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3711         enum port port = intel_dig_port->port;
3712         struct drm_device *dev = intel_dig_port->base.base.dev;
3713         struct drm_i915_private *dev_priv = dev->dev_private;
3714         uint32_t DP = intel_dp->DP;
3715
3716         if (WARN_ON(HAS_DDI(dev)))
3717                 return;
3718
3719         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3720                 return;
3721
3722         DRM_DEBUG_KMS("\n");
3723
3724         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3725                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3726                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3727         } else {
3728                 if (IS_CHERRYVIEW(dev))
3729                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3730                 else
3731                         DP &= ~DP_LINK_TRAIN_MASK;
3732                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3733         }
3734         POSTING_READ(intel_dp->output_reg);
3735
3736         if (HAS_PCH_IBX(dev) &&
3737             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3738                 /* Hardware workaround: leaving our transcoder select
3739                  * set to transcoder B while it's off will prevent the
3740                  * corresponding HDMI output on transcoder A.
3741                  *
3742                  * Combine this with another hardware workaround:
3743                  * transcoder select bit can only be cleared while the
3744                  * port is enabled.
3745                  */
3746                 DP &= ~DP_PIPEB_SELECT;
3747                 I915_WRITE(intel_dp->output_reg, DP);
3748                 POSTING_READ(intel_dp->output_reg);
3749         }
3750
3751         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3752         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3753         POSTING_READ(intel_dp->output_reg);
3754         msleep(intel_dp->panel_power_down_delay);
3755 }
3756
3757 static bool
3758 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3759 {
3760         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3761         struct drm_device *dev = dig_port->base.base.dev;
3762         struct drm_i915_private *dev_priv = dev->dev_private;
3763         uint8_t rev;
3764
3765         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3766                                     sizeof(intel_dp->dpcd)) < 0)
3767                 return false; /* aux transfer failed */
3768
3769         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3770
3771         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3772                 return false; /* DPCD not present */
3773
3774         /* Check if the panel supports PSR */
3775         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3776         if (is_edp(intel_dp)) {
3777                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3778                                         intel_dp->psr_dpcd,
3779                                         sizeof(intel_dp->psr_dpcd));
3780                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3781                         dev_priv->psr.sink_support = true;
3782                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3783                 }
3784         }
3785
3786         /* Training Pattern 3 support, both source and sink */
3787         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3788             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3789             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3790                 intel_dp->use_tps3 = true;
3791                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3792         } else
3793                 intel_dp->use_tps3 = false;
3794
3795         /* Intermediate frequency support */
3796         if (is_edp(intel_dp) &&
3797             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3798             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3799             (rev >= 0x03)) { /* eDp v1.4 or higher */
3800                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3801                 int i;
3802
3803                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3804                                 DP_SUPPORTED_LINK_RATES,
3805                                 sink_rates,
3806                                 sizeof(sink_rates));
3807
3808                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3809                         int val = le16_to_cpu(sink_rates[i]);
3810
3811                         if (val == 0)
3812                                 break;
3813
3814                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3815                         intel_dp->sink_rates[i] = (val * 200) / 10;
3816                 }
3817                 intel_dp->num_sink_rates = i;
3818         }
3819
3820         intel_dp_print_rates(intel_dp);
3821
3822         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3823               DP_DWN_STRM_PORT_PRESENT))
3824                 return true; /* native DP sink */
3825
3826         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3827                 return true; /* no per-port downstream info */
3828
3829         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3830                                     intel_dp->downstream_ports,
3831                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3832                 return false; /* downstream port status fetch failed */
3833
3834         return true;
3835 }
3836
3837 static void
3838 intel_dp_probe_oui(struct intel_dp *intel_dp)
3839 {
3840         u8 buf[3];
3841
3842         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3843                 return;
3844
3845         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3846                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3847                               buf[0], buf[1], buf[2]);
3848
3849         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3850                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3851                               buf[0], buf[1], buf[2]);
3852 }
3853
3854 static bool
3855 intel_dp_probe_mst(struct intel_dp *intel_dp)
3856 {
3857         u8 buf[1];
3858
3859         if (!intel_dp->can_mst)
3860                 return false;
3861
3862         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3863                 return false;
3864
3865         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3866                 if (buf[0] & DP_MST_CAP) {
3867                         DRM_DEBUG_KMS("Sink is MST capable\n");
3868                         intel_dp->is_mst = true;
3869                 } else {
3870                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3871                         intel_dp->is_mst = false;
3872                 }
3873         }
3874
3875         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3876         return intel_dp->is_mst;
3877 }
3878
3879 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3880 {
3881         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3882         struct drm_device *dev = intel_dig_port->base.base.dev;
3883         struct intel_crtc *intel_crtc =
3884                 to_intel_crtc(intel_dig_port->base.base.crtc);
3885         u8 buf;
3886         int test_crc_count;
3887         int attempts = 6;
3888
3889         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3890                 return -EIO;
3891
3892         if (!(buf & DP_TEST_CRC_SUPPORTED))
3893                 return -ENOTTY;
3894
3895         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3896                 return -EIO;
3897
3898         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3899                                 buf | DP_TEST_SINK_START) < 0)
3900                 return -EIO;
3901
3902         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3903                 return -EIO;
3904         test_crc_count = buf & DP_TEST_COUNT_MASK;
3905
3906         do {
3907                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3908                                       DP_TEST_SINK_MISC, &buf) < 0)
3909                         return -EIO;
3910                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3911         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3912
3913         if (attempts == 0) {
3914                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3915                 return -ETIMEDOUT;
3916         }
3917
3918         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3919                 return -EIO;
3920
3921         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3922                 return -EIO;
3923         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3924                                buf & ~DP_TEST_SINK_START) < 0)
3925                 return -EIO;
3926
3927         return 0;
3928 }
3929
3930 static bool
3931 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3932 {
3933         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3934                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3935                                        sink_irq_vector, 1) == 1;
3936 }
3937
3938 static bool
3939 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3940 {
3941         int ret;
3942
3943         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3944                                              DP_SINK_COUNT_ESI,
3945                                              sink_irq_vector, 14);
3946         if (ret != 14)
3947                 return false;
3948
3949         return true;
3950 }
3951
3952 static void
3953 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3954 {
3955         /* NAK by default */
3956         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3957 }
3958
3959 static int
3960 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3961 {
3962         bool bret;
3963
3964         if (intel_dp->is_mst) {
3965                 u8 esi[16] = { 0 };
3966                 int ret = 0;
3967                 int retry;
3968                 bool handled;
3969                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3970 go_again:
3971                 if (bret == true) {
3972
3973                         /* check link status - esi[10] = 0x200c */
3974                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3975                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3976                                 intel_dp_start_link_train(intel_dp);
3977                                 intel_dp_complete_link_train(intel_dp);
3978                                 intel_dp_stop_link_train(intel_dp);
3979                         }
3980
3981                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
3982                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3983
3984                         if (handled) {
3985                                 for (retry = 0; retry < 3; retry++) {
3986                                         int wret;
3987                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3988                                                                  DP_SINK_COUNT_ESI+1,
3989                                                                  &esi[1], 3);
3990                                         if (wret == 3) {
3991                                                 break;
3992                                         }
3993                                 }
3994
3995                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3996                                 if (bret == true) {
3997                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3998                                         goto go_again;
3999                                 }
4000                         } else
4001                                 ret = 0;
4002
4003                         return ret;
4004                 } else {
4005                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4006                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4007                         intel_dp->is_mst = false;
4008                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4009                         /* send a hotplug event */
4010                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4011                 }
4012         }
4013         return -EINVAL;
4014 }
4015
4016 /*
4017  * According to DP spec
4018  * 5.1.2:
4019  *  1. Read DPCD
4020  *  2. Configure link according to Receiver Capabilities
4021  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4022  *  4. Check link status on receipt of hot-plug interrupt
4023  */
4024 static void
4025 intel_dp_check_link_status(struct intel_dp *intel_dp)
4026 {
4027         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4028         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4029         u8 sink_irq_vector;
4030         u8 link_status[DP_LINK_STATUS_SIZE];
4031
4032         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4033
4034         if (!intel_encoder->connectors_active)
4035                 return;
4036
4037         if (WARN_ON(!intel_encoder->base.crtc))
4038                 return;
4039
4040         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4041                 return;
4042
4043         /* Try to read receiver status if the link appears to be up */
4044         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4045                 return;
4046         }
4047
4048         /* Now read the DPCD to see if it's actually running */
4049         if (!intel_dp_get_dpcd(intel_dp)) {
4050                 return;
4051         }
4052
4053         /* Try to read the source of the interrupt */
4054         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4055             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4056                 /* Clear interrupt source */
4057                 drm_dp_dpcd_writeb(&intel_dp->aux,
4058                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4059                                    sink_irq_vector);
4060
4061                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4062                         intel_dp_handle_test_request(intel_dp);
4063                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4064                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4065         }
4066
4067         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4068                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4069                               intel_encoder->base.name);
4070                 intel_dp_start_link_train(intel_dp);
4071                 intel_dp_complete_link_train(intel_dp);
4072                 intel_dp_stop_link_train(intel_dp);
4073         }
4074 }
4075
4076 /* XXX this is probably wrong for multiple downstream ports */
4077 static enum drm_connector_status
4078 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4079 {
4080         uint8_t *dpcd = intel_dp->dpcd;
4081         uint8_t type;
4082
4083         if (!intel_dp_get_dpcd(intel_dp))
4084                 return connector_status_disconnected;
4085
4086         /* if there's no downstream port, we're done */
4087         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4088                 return connector_status_connected;
4089
4090         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4091         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4092             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4093                 uint8_t reg;
4094
4095                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4096                                             &reg, 1) < 0)
4097                         return connector_status_unknown;
4098
4099                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4100                                               : connector_status_disconnected;
4101         }
4102
4103         /* If no HPD, poke DDC gently */
4104         if (drm_probe_ddc(&intel_dp->aux.ddc))
4105                 return connector_status_connected;
4106
4107         /* Well we tried, say unknown for unreliable port types */
4108         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4109                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4110                 if (type == DP_DS_PORT_TYPE_VGA ||
4111                     type == DP_DS_PORT_TYPE_NON_EDID)
4112                         return connector_status_unknown;
4113         } else {
4114                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4115                         DP_DWN_STRM_PORT_TYPE_MASK;
4116                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4117                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4118                         return connector_status_unknown;
4119         }
4120
4121         /* Anything else is out of spec, warn and ignore */
4122         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4123         return connector_status_disconnected;
4124 }
4125
4126 static enum drm_connector_status
4127 edp_detect(struct intel_dp *intel_dp)
4128 {
4129         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4130         enum drm_connector_status status;
4131
4132         status = intel_panel_detect(dev);
4133         if (status == connector_status_unknown)
4134                 status = connector_status_connected;
4135
4136         return status;
4137 }
4138
4139 static enum drm_connector_status
4140 ironlake_dp_detect(struct intel_dp *intel_dp)
4141 {
4142         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4143         struct drm_i915_private *dev_priv = dev->dev_private;
4144         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4145
4146         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4147                 return connector_status_disconnected;
4148
4149         return intel_dp_detect_dpcd(intel_dp);
4150 }
4151
4152 static int g4x_digital_port_connected(struct drm_device *dev,
4153                                        struct intel_digital_port *intel_dig_port)
4154 {
4155         struct drm_i915_private *dev_priv = dev->dev_private;
4156         uint32_t bit;
4157
4158         if (IS_VALLEYVIEW(dev)) {
4159                 switch (intel_dig_port->port) {
4160                 case PORT_B:
4161                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4162                         break;
4163                 case PORT_C:
4164                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4165                         break;
4166                 case PORT_D:
4167                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4168                         break;
4169                 default:
4170                         return -EINVAL;
4171                 }
4172         } else {
4173                 switch (intel_dig_port->port) {
4174                 case PORT_B:
4175                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4176                         break;
4177                 case PORT_C:
4178                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4179                         break;
4180                 case PORT_D:
4181                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4182                         break;
4183                 default:
4184                         return -EINVAL;
4185                 }
4186         }
4187
4188         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4189                 return 0;
4190         return 1;
4191 }
4192
4193 static enum drm_connector_status
4194 g4x_dp_detect(struct intel_dp *intel_dp)
4195 {
4196         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4197         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4198         int ret;
4199
4200         /* Can't disconnect eDP, but you can close the lid... */
4201         if (is_edp(intel_dp)) {
4202                 enum drm_connector_status status;
4203
4204                 status = intel_panel_detect(dev);
4205                 if (status == connector_status_unknown)
4206                         status = connector_status_connected;
4207                 return status;
4208         }
4209
4210         ret = g4x_digital_port_connected(dev, intel_dig_port);
4211         if (ret == -EINVAL)
4212                 return connector_status_unknown;
4213         else if (ret == 0)
4214                 return connector_status_disconnected;
4215
4216         return intel_dp_detect_dpcd(intel_dp);
4217 }
4218
4219 static struct edid *
4220 intel_dp_get_edid(struct intel_dp *intel_dp)
4221 {
4222         struct intel_connector *intel_connector = intel_dp->attached_connector;
4223
4224         /* use cached edid if we have one */
4225         if (intel_connector->edid) {
4226                 /* invalid edid */
4227                 if (IS_ERR(intel_connector->edid))
4228                         return NULL;
4229
4230                 return drm_edid_duplicate(intel_connector->edid);
4231         } else
4232                 return drm_get_edid(&intel_connector->base,
4233                                     &intel_dp->aux.ddc);
4234 }
4235
4236 static void
4237 intel_dp_set_edid(struct intel_dp *intel_dp)
4238 {
4239         struct intel_connector *intel_connector = intel_dp->attached_connector;
4240         struct edid *edid;
4241
4242         edid = intel_dp_get_edid(intel_dp);
4243         intel_connector->detect_edid = edid;
4244
4245         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4246                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4247         else
4248                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4249 }
4250
4251 static void
4252 intel_dp_unset_edid(struct intel_dp *intel_dp)
4253 {
4254         struct intel_connector *intel_connector = intel_dp->attached_connector;
4255
4256         kfree(intel_connector->detect_edid);
4257         intel_connector->detect_edid = NULL;
4258
4259         intel_dp->has_audio = false;
4260 }
4261
4262 static enum intel_display_power_domain
4263 intel_dp_power_get(struct intel_dp *dp)
4264 {
4265         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4266         enum intel_display_power_domain power_domain;
4267
4268         power_domain = intel_display_port_power_domain(encoder);
4269         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4270
4271         return power_domain;
4272 }
4273
4274 static void
4275 intel_dp_power_put(struct intel_dp *dp,
4276                    enum intel_display_power_domain power_domain)
4277 {
4278         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4279         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4280 }
4281
4282 static enum drm_connector_status
4283 intel_dp_detect(struct drm_connector *connector, bool force)
4284 {
4285         struct intel_dp *intel_dp = intel_attached_dp(connector);
4286         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4287         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4288         struct drm_device *dev = connector->dev;
4289         enum drm_connector_status status;
4290         enum intel_display_power_domain power_domain;
4291         bool ret;
4292
4293         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4294                       connector->base.id, connector->name);
4295         intel_dp_unset_edid(intel_dp);
4296
4297         if (intel_dp->is_mst) {
4298                 /* MST devices are disconnected from a monitor POV */
4299                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4300                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4301                 return connector_status_disconnected;
4302         }
4303
4304         power_domain = intel_dp_power_get(intel_dp);
4305
4306         /* Can't disconnect eDP, but you can close the lid... */
4307         if (is_edp(intel_dp))
4308                 status = edp_detect(intel_dp);
4309         else if (HAS_PCH_SPLIT(dev))
4310                 status = ironlake_dp_detect(intel_dp);
4311         else
4312                 status = g4x_dp_detect(intel_dp);
4313         if (status != connector_status_connected)
4314                 goto out;
4315
4316         intel_dp_probe_oui(intel_dp);
4317
4318         ret = intel_dp_probe_mst(intel_dp);
4319         if (ret) {
4320                 /* if we are in MST mode then this connector
4321                    won't appear connected or have anything with EDID on it */
4322                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4323                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4324                 status = connector_status_disconnected;
4325                 goto out;
4326         }
4327
4328         intel_dp_set_edid(intel_dp);
4329
4330         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4331                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4332         status = connector_status_connected;
4333
4334 out:
4335         intel_dp_power_put(intel_dp, power_domain);
4336         return status;
4337 }
4338
4339 static void
4340 intel_dp_force(struct drm_connector *connector)
4341 {
4342         struct intel_dp *intel_dp = intel_attached_dp(connector);
4343         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4344         enum intel_display_power_domain power_domain;
4345
4346         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4347                       connector->base.id, connector->name);
4348         intel_dp_unset_edid(intel_dp);
4349
4350         if (connector->status != connector_status_connected)
4351                 return;
4352
4353         power_domain = intel_dp_power_get(intel_dp);
4354
4355         intel_dp_set_edid(intel_dp);
4356
4357         intel_dp_power_put(intel_dp, power_domain);
4358
4359         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4360                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4361 }
4362
4363 static int intel_dp_get_modes(struct drm_connector *connector)
4364 {
4365         struct intel_connector *intel_connector = to_intel_connector(connector);
4366         struct edid *edid;
4367
4368         edid = intel_connector->detect_edid;
4369         if (edid) {
4370                 int ret = intel_connector_update_modes(connector, edid);
4371                 if (ret)
4372                         return ret;
4373         }
4374
4375         /* if eDP has no EDID, fall back to fixed mode */
4376         if (is_edp(intel_attached_dp(connector)) &&
4377             intel_connector->panel.fixed_mode) {
4378                 struct drm_display_mode *mode;
4379
4380                 mode = drm_mode_duplicate(connector->dev,
4381                                           intel_connector->panel.fixed_mode);
4382                 if (mode) {
4383                         drm_mode_probed_add(connector, mode);
4384                         return 1;
4385                 }
4386         }
4387
4388         return 0;
4389 }
4390
4391 static bool
4392 intel_dp_detect_audio(struct drm_connector *connector)
4393 {
4394         bool has_audio = false;
4395         struct edid *edid;
4396
4397         edid = to_intel_connector(connector)->detect_edid;
4398         if (edid)
4399                 has_audio = drm_detect_monitor_audio(edid);
4400
4401         return has_audio;
4402 }
4403
4404 static int
4405 intel_dp_set_property(struct drm_connector *connector,
4406                       struct drm_property *property,
4407                       uint64_t val)
4408 {
4409         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4410         struct intel_connector *intel_connector = to_intel_connector(connector);
4411         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4412         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4413         int ret;
4414
4415         ret = drm_object_property_set_value(&connector->base, property, val);
4416         if (ret)
4417                 return ret;
4418
4419         if (property == dev_priv->force_audio_property) {
4420                 int i = val;
4421                 bool has_audio;
4422
4423                 if (i == intel_dp->force_audio)
4424                         return 0;
4425
4426                 intel_dp->force_audio = i;
4427
4428                 if (i == HDMI_AUDIO_AUTO)
4429                         has_audio = intel_dp_detect_audio(connector);
4430                 else
4431                         has_audio = (i == HDMI_AUDIO_ON);
4432
4433                 if (has_audio == intel_dp->has_audio)
4434                         return 0;
4435
4436                 intel_dp->has_audio = has_audio;
4437                 goto done;
4438         }
4439
4440         if (property == dev_priv->broadcast_rgb_property) {
4441                 bool old_auto = intel_dp->color_range_auto;
4442                 uint32_t old_range = intel_dp->color_range;
4443
4444                 switch (val) {
4445                 case INTEL_BROADCAST_RGB_AUTO:
4446                         intel_dp->color_range_auto = true;
4447                         break;
4448                 case INTEL_BROADCAST_RGB_FULL:
4449                         intel_dp->color_range_auto = false;
4450                         intel_dp->color_range = 0;
4451                         break;
4452                 case INTEL_BROADCAST_RGB_LIMITED:
4453                         intel_dp->color_range_auto = false;
4454                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4455                         break;
4456                 default:
4457                         return -EINVAL;
4458                 }
4459
4460                 if (old_auto == intel_dp->color_range_auto &&
4461                     old_range == intel_dp->color_range)
4462                         return 0;
4463
4464                 goto done;
4465         }
4466
4467         if (is_edp(intel_dp) &&
4468             property == connector->dev->mode_config.scaling_mode_property) {
4469                 if (val == DRM_MODE_SCALE_NONE) {
4470                         DRM_DEBUG_KMS("no scaling not supported\n");
4471                         return -EINVAL;
4472                 }
4473
4474                 if (intel_connector->panel.fitting_mode == val) {
4475                         /* the eDP scaling property is not changed */
4476                         return 0;
4477                 }
4478                 intel_connector->panel.fitting_mode = val;
4479
4480                 goto done;
4481         }
4482
4483         return -EINVAL;
4484
4485 done:
4486         if (intel_encoder->base.crtc)
4487                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4488
4489         return 0;
4490 }
4491
4492 static void
4493 intel_dp_connector_destroy(struct drm_connector *connector)
4494 {
4495         struct intel_connector *intel_connector = to_intel_connector(connector);
4496
4497         kfree(intel_connector->detect_edid);
4498
4499         if (!IS_ERR_OR_NULL(intel_connector->edid))
4500                 kfree(intel_connector->edid);
4501
4502         /* Can't call is_edp() since the encoder may have been destroyed
4503          * already. */
4504         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4505                 intel_panel_fini(&intel_connector->panel);
4506
4507         drm_connector_cleanup(connector);
4508         kfree(connector);
4509 }
4510
4511 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4512 {
4513         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4514         struct intel_dp *intel_dp = &intel_dig_port->dp;
4515
4516         drm_dp_aux_unregister(&intel_dp->aux);
4517         intel_dp_mst_encoder_cleanup(intel_dig_port);
4518         if (is_edp(intel_dp)) {
4519                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4520                 /*
4521                  * vdd might still be enabled do to the delayed vdd off.
4522                  * Make sure vdd is actually turned off here.
4523                  */
4524                 pps_lock(intel_dp);
4525                 edp_panel_vdd_off_sync(intel_dp);
4526                 pps_unlock(intel_dp);
4527
4528                 if (intel_dp->edp_notifier.notifier_call) {
4529                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4530                         intel_dp->edp_notifier.notifier_call = NULL;
4531                 }
4532         }
4533         drm_encoder_cleanup(encoder);
4534         kfree(intel_dig_port);
4535 }
4536
4537 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4538 {
4539         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4540
4541         if (!is_edp(intel_dp))
4542                 return;
4543
4544         /*
4545          * vdd might still be enabled do to the delayed vdd off.
4546          * Make sure vdd is actually turned off here.
4547          */
4548         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4549         pps_lock(intel_dp);
4550         edp_panel_vdd_off_sync(intel_dp);
4551         pps_unlock(intel_dp);
4552 }
4553
4554 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4555 {
4556         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4557         struct drm_device *dev = intel_dig_port->base.base.dev;
4558         struct drm_i915_private *dev_priv = dev->dev_private;
4559         enum intel_display_power_domain power_domain;
4560
4561         lockdep_assert_held(&dev_priv->pps_mutex);
4562
4563         if (!edp_have_panel_vdd(intel_dp))
4564                 return;
4565
4566         /*
4567          * The VDD bit needs a power domain reference, so if the bit is
4568          * already enabled when we boot or resume, grab this reference and
4569          * schedule a vdd off, so we don't hold on to the reference
4570          * indefinitely.
4571          */
4572         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4573         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4574         intel_display_power_get(dev_priv, power_domain);
4575
4576         edp_panel_vdd_schedule_off(intel_dp);
4577 }
4578
4579 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4580 {
4581         struct intel_dp *intel_dp;
4582
4583         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4584                 return;
4585
4586         intel_dp = enc_to_intel_dp(encoder);
4587
4588         pps_lock(intel_dp);
4589
4590         /*
4591          * Read out the current power sequencer assignment,
4592          * in case the BIOS did something with it.
4593          */
4594         if (IS_VALLEYVIEW(encoder->dev))
4595                 vlv_initial_power_sequencer_setup(intel_dp);
4596
4597         intel_edp_panel_vdd_sanitize(intel_dp);
4598
4599         pps_unlock(intel_dp);
4600 }
4601
4602 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4603         .dpms = intel_connector_dpms,
4604         .detect = intel_dp_detect,
4605         .force = intel_dp_force,
4606         .fill_modes = drm_helper_probe_single_connector_modes,
4607         .set_property = intel_dp_set_property,
4608         .atomic_get_property = intel_connector_atomic_get_property,
4609         .destroy = intel_dp_connector_destroy,
4610         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4611         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4612 };
4613
4614 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4615         .get_modes = intel_dp_get_modes,
4616         .mode_valid = intel_dp_mode_valid,
4617         .best_encoder = intel_best_encoder,
4618 };
4619
4620 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4621         .reset = intel_dp_encoder_reset,
4622         .destroy = intel_dp_encoder_destroy,
4623 };
4624
4625 void
4626 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4627 {
4628         return;
4629 }
4630
4631 enum irqreturn
4632 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4633 {
4634         struct intel_dp *intel_dp = &intel_dig_port->dp;
4635         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4636         struct drm_device *dev = intel_dig_port->base.base.dev;
4637         struct drm_i915_private *dev_priv = dev->dev_private;
4638         enum intel_display_power_domain power_domain;
4639         enum irqreturn ret = IRQ_NONE;
4640
4641         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4642                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4643
4644         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4645                 /*
4646                  * vdd off can generate a long pulse on eDP which
4647                  * would require vdd on to handle it, and thus we
4648                  * would end up in an endless cycle of
4649                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4650                  */
4651                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4652                               port_name(intel_dig_port->port));
4653                 return IRQ_HANDLED;
4654         }
4655
4656         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4657                       port_name(intel_dig_port->port),
4658                       long_hpd ? "long" : "short");
4659
4660         power_domain = intel_display_port_power_domain(intel_encoder);
4661         intel_display_power_get(dev_priv, power_domain);
4662
4663         if (long_hpd) {
4664
4665                 if (HAS_PCH_SPLIT(dev)) {
4666                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4667                                 goto mst_fail;
4668                 } else {
4669                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4670                                 goto mst_fail;
4671                 }
4672
4673                 if (!intel_dp_get_dpcd(intel_dp)) {
4674                         goto mst_fail;
4675                 }
4676
4677                 intel_dp_probe_oui(intel_dp);
4678
4679                 if (!intel_dp_probe_mst(intel_dp))
4680                         goto mst_fail;
4681
4682         } else {
4683                 if (intel_dp->is_mst) {
4684                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4685                                 goto mst_fail;
4686                 }
4687
4688                 if (!intel_dp->is_mst) {
4689                         /*
4690                          * we'll check the link status via the normal hot plug path later -
4691                          * but for short hpds we should check it now
4692                          */
4693                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4694                         intel_dp_check_link_status(intel_dp);
4695                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4696                 }
4697         }
4698
4699         ret = IRQ_HANDLED;
4700
4701         goto put_power;
4702 mst_fail:
4703         /* if we were in MST mode, and device is not there get out of MST mode */
4704         if (intel_dp->is_mst) {
4705                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4706                 intel_dp->is_mst = false;
4707                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4708         }
4709 put_power:
4710         intel_display_power_put(dev_priv, power_domain);
4711
4712         return ret;
4713 }
4714
4715 /* Return which DP Port should be selected for Transcoder DP control */
4716 int
4717 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4718 {
4719         struct drm_device *dev = crtc->dev;
4720         struct intel_encoder *intel_encoder;
4721         struct intel_dp *intel_dp;
4722
4723         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4724                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4725
4726                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4727                     intel_encoder->type == INTEL_OUTPUT_EDP)
4728                         return intel_dp->output_reg;
4729         }
4730
4731         return -1;
4732 }
4733
4734 /* check the VBT to see whether the eDP is on DP-D port */
4735 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4736 {
4737         struct drm_i915_private *dev_priv = dev->dev_private;
4738         union child_device_config *p_child;
4739         int i;
4740         static const short port_mapping[] = {
4741                 [PORT_B] = PORT_IDPB,
4742                 [PORT_C] = PORT_IDPC,
4743                 [PORT_D] = PORT_IDPD,
4744         };
4745
4746         if (port == PORT_A)
4747                 return true;
4748
4749         if (!dev_priv->vbt.child_dev_num)
4750                 return false;
4751
4752         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4753                 p_child = dev_priv->vbt.child_dev + i;
4754
4755                 if (p_child->common.dvo_port == port_mapping[port] &&
4756                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4757                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4758                         return true;
4759         }
4760         return false;
4761 }
4762
4763 void
4764 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4765 {
4766         struct intel_connector *intel_connector = to_intel_connector(connector);
4767
4768         intel_attach_force_audio_property(connector);
4769         intel_attach_broadcast_rgb_property(connector);
4770         intel_dp->color_range_auto = true;
4771
4772         if (is_edp(intel_dp)) {
4773                 drm_mode_create_scaling_mode_property(connector->dev);
4774                 drm_object_attach_property(
4775                         &connector->base,
4776                         connector->dev->mode_config.scaling_mode_property,
4777                         DRM_MODE_SCALE_ASPECT);
4778                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4779         }
4780 }
4781
4782 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4783 {
4784         intel_dp->last_power_cycle = jiffies;
4785         intel_dp->last_power_on = jiffies;
4786         intel_dp->last_backlight_off = jiffies;
4787 }
4788
4789 static void
4790 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4791                                     struct intel_dp *intel_dp)
4792 {
4793         struct drm_i915_private *dev_priv = dev->dev_private;
4794         struct edp_power_seq cur, vbt, spec,
4795                 *final = &intel_dp->pps_delays;
4796         u32 pp_on, pp_off, pp_div, pp;
4797         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4798
4799         lockdep_assert_held(&dev_priv->pps_mutex);
4800
4801         /* already initialized? */
4802         if (final->t11_t12 != 0)
4803                 return;
4804
4805         if (HAS_PCH_SPLIT(dev)) {
4806                 pp_ctrl_reg = PCH_PP_CONTROL;
4807                 pp_on_reg = PCH_PP_ON_DELAYS;
4808                 pp_off_reg = PCH_PP_OFF_DELAYS;
4809                 pp_div_reg = PCH_PP_DIVISOR;
4810         } else {
4811                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4812
4813                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4814                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4815                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4816                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4817         }
4818
4819         /* Workaround: Need to write PP_CONTROL with the unlock key as
4820          * the very first thing. */
4821         pp = ironlake_get_pp_control(intel_dp);
4822         I915_WRITE(pp_ctrl_reg, pp);
4823
4824         pp_on = I915_READ(pp_on_reg);
4825         pp_off = I915_READ(pp_off_reg);
4826         pp_div = I915_READ(pp_div_reg);
4827
4828         /* Pull timing values out of registers */
4829         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4830                 PANEL_POWER_UP_DELAY_SHIFT;
4831
4832         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4833                 PANEL_LIGHT_ON_DELAY_SHIFT;
4834
4835         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4836                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4837
4838         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4839                 PANEL_POWER_DOWN_DELAY_SHIFT;
4840
4841         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4842                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4843
4844         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4845                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4846
4847         vbt = dev_priv->vbt.edp_pps;
4848
4849         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4850          * our hw here, which are all in 100usec. */
4851         spec.t1_t3 = 210 * 10;
4852         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4853         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4854         spec.t10 = 500 * 10;
4855         /* This one is special and actually in units of 100ms, but zero
4856          * based in the hw (so we need to add 100 ms). But the sw vbt
4857          * table multiplies it with 1000 to make it in units of 100usec,
4858          * too. */
4859         spec.t11_t12 = (510 + 100) * 10;
4860
4861         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4862                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4863
4864         /* Use the max of the register settings and vbt. If both are
4865          * unset, fall back to the spec limits. */
4866 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4867                                        spec.field : \
4868                                        max(cur.field, vbt.field))
4869         assign_final(t1_t3);
4870         assign_final(t8);
4871         assign_final(t9);
4872         assign_final(t10);
4873         assign_final(t11_t12);
4874 #undef assign_final
4875
4876 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4877         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4878         intel_dp->backlight_on_delay = get_delay(t8);
4879         intel_dp->backlight_off_delay = get_delay(t9);
4880         intel_dp->panel_power_down_delay = get_delay(t10);
4881         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4882 #undef get_delay
4883
4884         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4885                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4886                       intel_dp->panel_power_cycle_delay);
4887
4888         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4889                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4890 }
4891
4892 static void
4893 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4894                                               struct intel_dp *intel_dp)
4895 {
4896         struct drm_i915_private *dev_priv = dev->dev_private;
4897         u32 pp_on, pp_off, pp_div, port_sel = 0;
4898         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4899         int pp_on_reg, pp_off_reg, pp_div_reg;
4900         enum port port = dp_to_dig_port(intel_dp)->port;
4901         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4902
4903         lockdep_assert_held(&dev_priv->pps_mutex);
4904
4905         if (HAS_PCH_SPLIT(dev)) {
4906                 pp_on_reg = PCH_PP_ON_DELAYS;
4907                 pp_off_reg = PCH_PP_OFF_DELAYS;
4908                 pp_div_reg = PCH_PP_DIVISOR;
4909         } else {
4910                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4911
4912                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4913                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4914                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4915         }
4916
4917         /*
4918          * And finally store the new values in the power sequencer. The
4919          * backlight delays are set to 1 because we do manual waits on them. For
4920          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4921          * we'll end up waiting for the backlight off delay twice: once when we
4922          * do the manual sleep, and once when we disable the panel and wait for
4923          * the PP_STATUS bit to become zero.
4924          */
4925         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4926                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4927         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4928                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4929         /* Compute the divisor for the pp clock, simply match the Bspec
4930          * formula. */
4931         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4932         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4933                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
4934
4935         /* Haswell doesn't have any port selection bits for the panel
4936          * power sequencer any more. */
4937         if (IS_VALLEYVIEW(dev)) {
4938                 port_sel = PANEL_PORT_SELECT_VLV(port);
4939         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4940                 if (port == PORT_A)
4941                         port_sel = PANEL_PORT_SELECT_DPA;
4942                 else
4943                         port_sel = PANEL_PORT_SELECT_DPD;
4944         }
4945
4946         pp_on |= port_sel;
4947
4948         I915_WRITE(pp_on_reg, pp_on);
4949         I915_WRITE(pp_off_reg, pp_off);
4950         I915_WRITE(pp_div_reg, pp_div);
4951
4952         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4953                       I915_READ(pp_on_reg),
4954                       I915_READ(pp_off_reg),
4955                       I915_READ(pp_div_reg));
4956 }
4957
4958 /**
4959  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4960  * @dev: DRM device
4961  * @refresh_rate: RR to be programmed
4962  *
4963  * This function gets called when refresh rate (RR) has to be changed from
4964  * one frequency to another. Switches can be between high and low RR
4965  * supported by the panel or to any other RR based on media playback (in
4966  * this case, RR value needs to be passed from user space).
4967  *
4968  * The caller of this function needs to take a lock on dev_priv->drrs.
4969  */
4970 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4971 {
4972         struct drm_i915_private *dev_priv = dev->dev_private;
4973         struct intel_encoder *encoder;
4974         struct intel_digital_port *dig_port = NULL;
4975         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4976         struct intel_crtc_state *config = NULL;
4977         struct intel_crtc *intel_crtc = NULL;
4978         u32 reg, val;
4979         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4980
4981         if (refresh_rate <= 0) {
4982                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4983                 return;
4984         }
4985
4986         if (intel_dp == NULL) {
4987                 DRM_DEBUG_KMS("DRRS not supported.\n");
4988                 return;
4989         }
4990
4991         /*
4992          * FIXME: This needs proper synchronization with psr state for some
4993          * platforms that cannot have PSR and DRRS enabled at the same time.
4994          */
4995
4996         dig_port = dp_to_dig_port(intel_dp);
4997         encoder = &dig_port->base;
4998         intel_crtc = to_intel_crtc(encoder->base.crtc);
4999
5000         if (!intel_crtc) {
5001                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5002                 return;
5003         }
5004
5005         config = intel_crtc->config;
5006
5007         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5008                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5009                 return;
5010         }
5011
5012         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5013                         refresh_rate)
5014                 index = DRRS_LOW_RR;
5015
5016         if (index == dev_priv->drrs.refresh_rate_type) {
5017                 DRM_DEBUG_KMS(
5018                         "DRRS requested for previously set RR...ignoring\n");
5019                 return;
5020         }
5021
5022         if (!intel_crtc->active) {
5023                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5024                 return;
5025         }
5026
5027         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5028                 switch (index) {
5029                 case DRRS_HIGH_RR:
5030                         intel_dp_set_m_n(intel_crtc, M1_N1);
5031                         break;
5032                 case DRRS_LOW_RR:
5033                         intel_dp_set_m_n(intel_crtc, M2_N2);
5034                         break;
5035                 case DRRS_MAX_RR:
5036                 default:
5037                         DRM_ERROR("Unsupported refreshrate type\n");
5038                 }
5039         } else if (INTEL_INFO(dev)->gen > 6) {
5040                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5041                 val = I915_READ(reg);
5042
5043                 if (index > DRRS_HIGH_RR) {
5044                         if (IS_VALLEYVIEW(dev))
5045                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5046                         else
5047                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5048                 } else {
5049                         if (IS_VALLEYVIEW(dev))
5050                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5051                         else
5052                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5053                 }
5054                 I915_WRITE(reg, val);
5055         }
5056
5057         dev_priv->drrs.refresh_rate_type = index;
5058
5059         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5060 }
5061
5062 /**
5063  * intel_edp_drrs_enable - init drrs struct if supported
5064  * @intel_dp: DP struct
5065  *
5066  * Initializes frontbuffer_bits and drrs.dp
5067  */
5068 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5069 {
5070         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5071         struct drm_i915_private *dev_priv = dev->dev_private;
5072         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5073         struct drm_crtc *crtc = dig_port->base.base.crtc;
5074         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5075
5076         if (!intel_crtc->config->has_drrs) {
5077                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5078                 return;
5079         }
5080
5081         mutex_lock(&dev_priv->drrs.mutex);
5082         if (WARN_ON(dev_priv->drrs.dp)) {
5083                 DRM_ERROR("DRRS already enabled\n");
5084                 goto unlock;
5085         }
5086
5087         dev_priv->drrs.busy_frontbuffer_bits = 0;
5088
5089         dev_priv->drrs.dp = intel_dp;
5090
5091 unlock:
5092         mutex_unlock(&dev_priv->drrs.mutex);
5093 }
5094
5095 /**
5096  * intel_edp_drrs_disable - Disable DRRS
5097  * @intel_dp: DP struct
5098  *
5099  */
5100 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5101 {
5102         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5103         struct drm_i915_private *dev_priv = dev->dev_private;
5104         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5105         struct drm_crtc *crtc = dig_port->base.base.crtc;
5106         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5107
5108         if (!intel_crtc->config->has_drrs)
5109                 return;
5110
5111         mutex_lock(&dev_priv->drrs.mutex);
5112         if (!dev_priv->drrs.dp) {
5113                 mutex_unlock(&dev_priv->drrs.mutex);
5114                 return;
5115         }
5116
5117         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5118                 intel_dp_set_drrs_state(dev_priv->dev,
5119                         intel_dp->attached_connector->panel.
5120                         fixed_mode->vrefresh);
5121
5122         dev_priv->drrs.dp = NULL;
5123         mutex_unlock(&dev_priv->drrs.mutex);
5124
5125         cancel_delayed_work_sync(&dev_priv->drrs.work);
5126 }
5127
5128 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5129 {
5130         struct drm_i915_private *dev_priv =
5131                 container_of(work, typeof(*dev_priv), drrs.work.work);
5132         struct intel_dp *intel_dp;
5133
5134         mutex_lock(&dev_priv->drrs.mutex);
5135
5136         intel_dp = dev_priv->drrs.dp;
5137
5138         if (!intel_dp)
5139                 goto unlock;
5140
5141         /*
5142          * The delayed work can race with an invalidate hence we need to
5143          * recheck.
5144          */
5145
5146         if (dev_priv->drrs.busy_frontbuffer_bits)
5147                 goto unlock;
5148
5149         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5150                 intel_dp_set_drrs_state(dev_priv->dev,
5151                         intel_dp->attached_connector->panel.
5152                         downclock_mode->vrefresh);
5153
5154 unlock:
5155         mutex_unlock(&dev_priv->drrs.mutex);
5156 }
5157
5158 /**
5159  * intel_edp_drrs_invalidate - Invalidate DRRS
5160  * @dev: DRM device
5161  * @frontbuffer_bits: frontbuffer plane tracking bits
5162  *
5163  * When there is a disturbance on screen (due to cursor movement/time
5164  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5165  * high RR.
5166  *
5167  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5168  */
5169 void intel_edp_drrs_invalidate(struct drm_device *dev,
5170                 unsigned frontbuffer_bits)
5171 {
5172         struct drm_i915_private *dev_priv = dev->dev_private;
5173         struct drm_crtc *crtc;
5174         enum pipe pipe;
5175
5176         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5177                 return;
5178
5179         cancel_delayed_work(&dev_priv->drrs.work);
5180
5181         mutex_lock(&dev_priv->drrs.mutex);
5182         if (!dev_priv->drrs.dp) {
5183                 mutex_unlock(&dev_priv->drrs.mutex);
5184                 return;
5185         }
5186
5187         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5188         pipe = to_intel_crtc(crtc)->pipe;
5189
5190         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5191                 intel_dp_set_drrs_state(dev_priv->dev,
5192                                 dev_priv->drrs.dp->attached_connector->panel.
5193                                 fixed_mode->vrefresh);
5194         }
5195
5196         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5197
5198         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5199         mutex_unlock(&dev_priv->drrs.mutex);
5200 }
5201
5202 /**
5203  * intel_edp_drrs_flush - Flush DRRS
5204  * @dev: DRM device
5205  * @frontbuffer_bits: frontbuffer plane tracking bits
5206  *
5207  * When there is no movement on screen, DRRS work can be scheduled.
5208  * This DRRS work is responsible for setting relevant registers after a
5209  * timeout of 1 second.
5210  *
5211  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5212  */
5213 void intel_edp_drrs_flush(struct drm_device *dev,
5214                 unsigned frontbuffer_bits)
5215 {
5216         struct drm_i915_private *dev_priv = dev->dev_private;
5217         struct drm_crtc *crtc;
5218         enum pipe pipe;
5219
5220         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5221                 return;
5222
5223         cancel_delayed_work(&dev_priv->drrs.work);
5224
5225         mutex_lock(&dev_priv->drrs.mutex);
5226         if (!dev_priv->drrs.dp) {
5227                 mutex_unlock(&dev_priv->drrs.mutex);
5228                 return;
5229         }
5230
5231         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5232         pipe = to_intel_crtc(crtc)->pipe;
5233         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5234
5235         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5236                         !dev_priv->drrs.busy_frontbuffer_bits)
5237                 schedule_delayed_work(&dev_priv->drrs.work,
5238                                 msecs_to_jiffies(1000));
5239         mutex_unlock(&dev_priv->drrs.mutex);
5240 }
5241
5242 /**
5243  * DOC: Display Refresh Rate Switching (DRRS)
5244  *
5245  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5246  * which enables swtching between low and high refresh rates,
5247  * dynamically, based on the usage scenario. This feature is applicable
5248  * for internal panels.
5249  *
5250  * Indication that the panel supports DRRS is given by the panel EDID, which
5251  * would list multiple refresh rates for one resolution.
5252  *
5253  * DRRS is of 2 types - static and seamless.
5254  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5255  * (may appear as a blink on screen) and is used in dock-undock scenario.
5256  * Seamless DRRS involves changing RR without any visual effect to the user
5257  * and can be used during normal system usage. This is done by programming
5258  * certain registers.
5259  *
5260  * Support for static/seamless DRRS may be indicated in the VBT based on
5261  * inputs from the panel spec.
5262  *
5263  * DRRS saves power by switching to low RR based on usage scenarios.
5264  *
5265  * eDP DRRS:-
5266  *        The implementation is based on frontbuffer tracking implementation.
5267  * When there is a disturbance on the screen triggered by user activity or a
5268  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5269  * When there is no movement on screen, after a timeout of 1 second, a switch
5270  * to low RR is made.
5271  *        For integration with frontbuffer tracking code,
5272  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5273  *
5274  * DRRS can be further extended to support other internal panels and also
5275  * the scenario of video playback wherein RR is set based on the rate
5276  * requested by userspace.
5277  */
5278
5279 /**
5280  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5281  * @intel_connector: eDP connector
5282  * @fixed_mode: preferred mode of panel
5283  *
5284  * This function is  called only once at driver load to initialize basic
5285  * DRRS stuff.
5286  *
5287  * Returns:
5288  * Downclock mode if panel supports it, else return NULL.
5289  * DRRS support is determined by the presence of downclock mode (apart
5290  * from VBT setting).
5291  */
5292 static struct drm_display_mode *
5293 intel_dp_drrs_init(struct intel_connector *intel_connector,
5294                 struct drm_display_mode *fixed_mode)
5295 {
5296         struct drm_connector *connector = &intel_connector->base;
5297         struct drm_device *dev = connector->dev;
5298         struct drm_i915_private *dev_priv = dev->dev_private;
5299         struct drm_display_mode *downclock_mode = NULL;
5300
5301         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5302         mutex_init(&dev_priv->drrs.mutex);
5303
5304         if (INTEL_INFO(dev)->gen <= 6) {
5305                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5306                 return NULL;
5307         }
5308
5309         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5310                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5311                 return NULL;
5312         }
5313
5314         downclock_mode = intel_find_panel_downclock
5315                                         (dev, fixed_mode, connector);
5316
5317         if (!downclock_mode) {
5318                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5319                 return NULL;
5320         }
5321
5322         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5323
5324         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5325         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5326         return downclock_mode;
5327 }
5328
5329 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5330                                      struct intel_connector *intel_connector)
5331 {
5332         struct drm_connector *connector = &intel_connector->base;
5333         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5334         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5335         struct drm_device *dev = intel_encoder->base.dev;
5336         struct drm_i915_private *dev_priv = dev->dev_private;
5337         struct drm_display_mode *fixed_mode = NULL;
5338         struct drm_display_mode *downclock_mode = NULL;
5339         bool has_dpcd;
5340         struct drm_display_mode *scan;
5341         struct edid *edid;
5342         enum pipe pipe = INVALID_PIPE;
5343
5344         if (!is_edp(intel_dp))
5345                 return true;
5346
5347         pps_lock(intel_dp);
5348         intel_edp_panel_vdd_sanitize(intel_dp);
5349         pps_unlock(intel_dp);
5350
5351         /* Cache DPCD and EDID for edp. */
5352         has_dpcd = intel_dp_get_dpcd(intel_dp);
5353
5354         if (has_dpcd) {
5355                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5356                         dev_priv->no_aux_handshake =
5357                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5358                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5359         } else {
5360                 /* if this fails, presume the device is a ghost */
5361                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5362                 return false;
5363         }
5364
5365         /* We now know it's not a ghost, init power sequence regs. */
5366         pps_lock(intel_dp);
5367         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5368         pps_unlock(intel_dp);
5369
5370         mutex_lock(&dev->mode_config.mutex);
5371         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5372         if (edid) {
5373                 if (drm_add_edid_modes(connector, edid)) {
5374                         drm_mode_connector_update_edid_property(connector,
5375                                                                 edid);
5376                         drm_edid_to_eld(connector, edid);
5377                 } else {
5378                         kfree(edid);
5379                         edid = ERR_PTR(-EINVAL);
5380                 }
5381         } else {
5382                 edid = ERR_PTR(-ENOENT);
5383         }
5384         intel_connector->edid = edid;
5385
5386         /* prefer fixed mode from EDID if available */
5387         list_for_each_entry(scan, &connector->probed_modes, head) {
5388                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5389                         fixed_mode = drm_mode_duplicate(dev, scan);
5390                         downclock_mode = intel_dp_drrs_init(
5391                                                 intel_connector, fixed_mode);
5392                         break;
5393                 }
5394         }
5395
5396         /* fallback to VBT if available for eDP */
5397         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5398                 fixed_mode = drm_mode_duplicate(dev,
5399                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5400                 if (fixed_mode)
5401                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5402         }
5403         mutex_unlock(&dev->mode_config.mutex);
5404
5405         if (IS_VALLEYVIEW(dev)) {
5406                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5407                 register_reboot_notifier(&intel_dp->edp_notifier);
5408
5409                 /*
5410                  * Figure out the current pipe for the initial backlight setup.
5411                  * If the current pipe isn't valid, try the PPS pipe, and if that
5412                  * fails just assume pipe A.
5413                  */
5414                 if (IS_CHERRYVIEW(dev))
5415                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5416                 else
5417                         pipe = PORT_TO_PIPE(intel_dp->DP);
5418
5419                 if (pipe != PIPE_A && pipe != PIPE_B)
5420                         pipe = intel_dp->pps_pipe;
5421
5422                 if (pipe != PIPE_A && pipe != PIPE_B)
5423                         pipe = PIPE_A;
5424
5425                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5426                               pipe_name(pipe));
5427         }
5428
5429         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5430         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5431         intel_panel_setup_backlight(connector, pipe);
5432
5433         return true;
5434 }
5435
5436 bool
5437 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5438                         struct intel_connector *intel_connector)
5439 {
5440         struct drm_connector *connector = &intel_connector->base;
5441         struct intel_dp *intel_dp = &intel_dig_port->dp;
5442         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5443         struct drm_device *dev = intel_encoder->base.dev;
5444         struct drm_i915_private *dev_priv = dev->dev_private;
5445         enum port port = intel_dig_port->port;
5446         int type;
5447
5448         intel_dp->pps_pipe = INVALID_PIPE;
5449
5450         /* intel_dp vfuncs */
5451         if (INTEL_INFO(dev)->gen >= 9)
5452                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5453         else if (IS_VALLEYVIEW(dev))
5454                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5455         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5456                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5457         else if (HAS_PCH_SPLIT(dev))
5458                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5459         else
5460                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5461
5462         if (INTEL_INFO(dev)->gen >= 9)
5463                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5464         else
5465                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5466
5467         /* Preserve the current hw state. */
5468         intel_dp->DP = I915_READ(intel_dp->output_reg);
5469         intel_dp->attached_connector = intel_connector;
5470
5471         if (intel_dp_is_edp(dev, port))
5472                 type = DRM_MODE_CONNECTOR_eDP;
5473         else
5474                 type = DRM_MODE_CONNECTOR_DisplayPort;
5475
5476         /*
5477          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5478          * for DP the encoder type can be set by the caller to
5479          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5480          */
5481         if (type == DRM_MODE_CONNECTOR_eDP)
5482                 intel_encoder->type = INTEL_OUTPUT_EDP;
5483
5484         /* eDP only on port B and/or C on vlv/chv */
5485         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5486                     port != PORT_B && port != PORT_C))
5487                 return false;
5488
5489         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5490                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5491                         port_name(port));
5492
5493         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5494         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5495
5496         connector->interlace_allowed = true;
5497         connector->doublescan_allowed = 0;
5498
5499         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5500                           edp_panel_vdd_work);
5501
5502         intel_connector_attach_encoder(intel_connector, intel_encoder);
5503         drm_connector_register(connector);
5504
5505         if (HAS_DDI(dev))
5506                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5507         else
5508                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5509         intel_connector->unregister = intel_dp_connector_unregister;
5510
5511         /* Set up the hotplug pin. */
5512         switch (port) {
5513         case PORT_A:
5514                 intel_encoder->hpd_pin = HPD_PORT_A;
5515                 break;
5516         case PORT_B:
5517                 intel_encoder->hpd_pin = HPD_PORT_B;
5518                 break;
5519         case PORT_C:
5520                 intel_encoder->hpd_pin = HPD_PORT_C;
5521                 break;
5522         case PORT_D:
5523                 intel_encoder->hpd_pin = HPD_PORT_D;
5524                 break;
5525         default:
5526                 BUG();
5527         }
5528
5529         if (is_edp(intel_dp)) {
5530                 pps_lock(intel_dp);
5531                 intel_dp_init_panel_power_timestamps(intel_dp);
5532                 if (IS_VALLEYVIEW(dev))
5533                         vlv_initial_power_sequencer_setup(intel_dp);
5534                 else
5535                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5536                 pps_unlock(intel_dp);
5537         }
5538
5539         intel_dp_aux_init(intel_dp, intel_connector);
5540
5541         /* init MST on ports that can support it */
5542         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5543                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5544                         intel_dp_mst_encoder_init(intel_dig_port,
5545                                                   intel_connector->base.base.id);
5546                 }
5547         }
5548
5549         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5550                 drm_dp_aux_unregister(&intel_dp->aux);
5551                 if (is_edp(intel_dp)) {
5552                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5553                         /*
5554                          * vdd might still be enabled do to the delayed vdd off.
5555                          * Make sure vdd is actually turned off here.
5556                          */
5557                         pps_lock(intel_dp);
5558                         edp_panel_vdd_off_sync(intel_dp);
5559                         pps_unlock(intel_dp);
5560                 }
5561                 drm_connector_unregister(connector);
5562                 drm_connector_cleanup(connector);
5563                 return false;
5564         }
5565
5566         intel_dp_add_properties(intel_dp, connector);
5567
5568         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5569          * 0xd.  Failure to do so will result in spurious interrupts being
5570          * generated on the port when a cable is not attached.
5571          */
5572         if (IS_G4X(dev) && !IS_GM45(dev)) {
5573                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5574                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5575         }
5576
5577         return true;
5578 }
5579
5580 void
5581 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5582 {
5583         struct drm_i915_private *dev_priv = dev->dev_private;
5584         struct intel_digital_port *intel_dig_port;
5585         struct intel_encoder *intel_encoder;
5586         struct drm_encoder *encoder;
5587         struct intel_connector *intel_connector;
5588
5589         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5590         if (!intel_dig_port)
5591                 return;
5592
5593         intel_connector = intel_connector_alloc();
5594         if (!intel_connector) {
5595                 kfree(intel_dig_port);
5596                 return;
5597         }
5598
5599         intel_encoder = &intel_dig_port->base;
5600         encoder = &intel_encoder->base;
5601
5602         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5603                          DRM_MODE_ENCODER_TMDS);
5604
5605         intel_encoder->compute_config = intel_dp_compute_config;
5606         intel_encoder->disable = intel_disable_dp;
5607         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5608         intel_encoder->get_config = intel_dp_get_config;
5609         intel_encoder->suspend = intel_dp_encoder_suspend;
5610         if (IS_CHERRYVIEW(dev)) {
5611                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5612                 intel_encoder->pre_enable = chv_pre_enable_dp;
5613                 intel_encoder->enable = vlv_enable_dp;
5614                 intel_encoder->post_disable = chv_post_disable_dp;
5615         } else if (IS_VALLEYVIEW(dev)) {
5616                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5617                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5618                 intel_encoder->enable = vlv_enable_dp;
5619                 intel_encoder->post_disable = vlv_post_disable_dp;
5620         } else {
5621                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5622                 intel_encoder->enable = g4x_enable_dp;
5623                 if (INTEL_INFO(dev)->gen >= 5)
5624                         intel_encoder->post_disable = ilk_post_disable_dp;
5625         }
5626
5627         intel_dig_port->port = port;
5628         intel_dig_port->dp.output_reg = output_reg;
5629
5630         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5631         if (IS_CHERRYVIEW(dev)) {
5632                 if (port == PORT_D)
5633                         intel_encoder->crtc_mask = 1 << 2;
5634                 else
5635                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5636         } else {
5637                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5638         }
5639         intel_encoder->cloneable = 0;
5640         intel_encoder->hot_plug = intel_dp_hot_plug;
5641
5642         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5643         dev_priv->hpd_irq_port[port] = intel_dig_port;
5644
5645         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5646                 drm_encoder_cleanup(encoder);
5647                 kfree(intel_dig_port);
5648                 kfree(intel_connector);
5649         }
5650 }
5651
5652 void intel_dp_mst_suspend(struct drm_device *dev)
5653 {
5654         struct drm_i915_private *dev_priv = dev->dev_private;
5655         int i;
5656
5657         /* disable MST */
5658         for (i = 0; i < I915_MAX_PORTS; i++) {
5659                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5660                 if (!intel_dig_port)
5661                         continue;
5662
5663                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5664                         if (!intel_dig_port->dp.can_mst)
5665                                 continue;
5666                         if (intel_dig_port->dp.is_mst)
5667                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5668                 }
5669         }
5670 }
5671
5672 void intel_dp_mst_resume(struct drm_device *dev)
5673 {
5674         struct drm_i915_private *dev_priv = dev->dev_private;
5675         int i;
5676
5677         for (i = 0; i < I915_MAX_PORTS; i++) {
5678                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5679                 if (!intel_dig_port)
5680                         continue;
5681                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5682                         int ret;
5683
5684                         if (!intel_dig_port->dp.can_mst)
5685                                 continue;
5686
5687                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5688                         if (ret != 0) {
5689                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5690                         }
5691                 }
5692         }
5693 }