Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
1 /*
2  * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include "mdp5_kms.h"
20
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
23 #include "drm_crtc.h"
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
26
27 #define CURSOR_WIDTH    64
28 #define CURSOR_HEIGHT   64
29
30 #define SSPP_MAX        (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
31
32 struct mdp5_crtc {
33         struct drm_crtc base;
34         char name[8];
35         int id;
36         bool enabled;
37
38         /* layer mixer used for this CRTC (+ its lock): */
39 #define GET_LM_ID(crtc_id)      ((crtc_id == 3) ? 5 : crtc_id)
40         int lm;
41         spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
42
43         /* CTL used for this CRTC: */
44         struct mdp5_ctl *ctl;
45
46         /* if there is a pending flip, these will be non-null: */
47         struct drm_pending_vblank_event *event;
48
49 #define PENDING_CURSOR 0x1
50 #define PENDING_FLIP   0x2
51         atomic_t pending;
52
53         /* for unref'ing cursor bo's after scanout completes: */
54         struct drm_flip_work unref_cursor_work;
55
56         struct mdp_irq vblank;
57         struct mdp_irq err;
58
59         struct {
60                 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
61                 spinlock_t lock;
62
63                 /* current cursor being scanned out: */
64                 struct drm_gem_object *scanout_bo;
65                 uint32_t width, height;
66                 uint32_t x, y;
67         } cursor;
68 };
69 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
70
71 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
72 {
73         struct msm_drm_private *priv = crtc->dev->dev_private;
74         return to_mdp5_kms(to_mdp_kms(priv->kms));
75 }
76
77 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
78 {
79         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
80
81         atomic_or(pending, &mdp5_crtc->pending);
82         mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
83 }
84
85 static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
86 {
87         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
88
89         DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
90         mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
91 }
92
93 /*
94  * flush updates, to make sure hw is updated to new scanout fb,
95  * so that we can safely queue unref to current fb (ie. next
96  * vblank we know hw is done w/ previous scanout_fb).
97  */
98 static void crtc_flush_all(struct drm_crtc *crtc)
99 {
100         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
101         struct drm_plane *plane;
102         uint32_t flush_mask = 0;
103
104         /* this should not happen: */
105         if (WARN_ON(!mdp5_crtc->ctl))
106                 return;
107
108         drm_atomic_crtc_for_each_plane(plane, crtc) {
109                 flush_mask |= mdp5_plane_get_flush(plane);
110         }
111
112         flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
113
114         crtc_flush(crtc, flush_mask);
115 }
116
117 /* if file!=NULL, this is preclose potential cancel-flip path */
118 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
119 {
120         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
121         struct drm_device *dev = crtc->dev;
122         struct drm_pending_vblank_event *event;
123         struct drm_plane *plane;
124         unsigned long flags;
125
126         spin_lock_irqsave(&dev->event_lock, flags);
127         event = mdp5_crtc->event;
128         if (event) {
129                 /* if regular vblank case (!file) or if cancel-flip from
130                  * preclose on file that requested flip, then send the
131                  * event:
132                  */
133                 if (!file || (event->base.file_priv == file)) {
134                         mdp5_crtc->event = NULL;
135                         DBG("%s: send event: %p", mdp5_crtc->name, event);
136                         drm_send_vblank_event(dev, mdp5_crtc->id, event);
137                 }
138         }
139         spin_unlock_irqrestore(&dev->event_lock, flags);
140
141         drm_atomic_crtc_for_each_plane(plane, crtc) {
142                 mdp5_plane_complete_flip(plane);
143         }
144
145         if (mdp5_crtc->ctl && !crtc->state->enable) {
146                 mdp5_ctl_release(mdp5_crtc->ctl);
147                 mdp5_crtc->ctl = NULL;
148         }
149 }
150
151 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
152 {
153         struct mdp5_crtc *mdp5_crtc =
154                 container_of(work, struct mdp5_crtc, unref_cursor_work);
155         struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
156
157         msm_gem_put_iova(val, mdp5_kms->id);
158         drm_gem_object_unreference_unlocked(val);
159 }
160
161 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
162 {
163         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
164
165         drm_crtc_cleanup(crtc);
166         drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
167
168         kfree(mdp5_crtc);
169 }
170
171 static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
172                 const struct drm_display_mode *mode,
173                 struct drm_display_mode *adjusted_mode)
174 {
175         return true;
176 }
177
178 /*
179  * blend_setup() - blend all the planes of a CRTC
180  *
181  * When border is enabled, the border color will ALWAYS be the base layer.
182  * Therefore, the first plane (private RGB pipe) will start at STAGE0.
183  * If disabled, the first plane starts at STAGE_BASE.
184  *
185  * Note:
186  * Border is not enabled here because the private plane is exactly
187  * the CRTC resolution.
188  */
189 static void blend_setup(struct drm_crtc *crtc)
190 {
191         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
192         struct mdp5_kms *mdp5_kms = get_kms(crtc);
193         struct drm_plane *plane;
194         const struct mdp5_cfg_hw *hw_cfg;
195         uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
196         unsigned long flags;
197 #define blender(stage)  ((stage) - STAGE_BASE)
198
199         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
200
201         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
202
203         /* ctl could be released already when we are shutting down: */
204         if (!mdp5_crtc->ctl)
205                 goto out;
206
207         drm_atomic_crtc_for_each_plane(plane, crtc) {
208                 enum mdp_mixer_stage_id stage =
209                         to_mdp5_plane_state(plane->state)->stage;
210
211                 /*
212                  * Note: This cannot happen with current implementation but
213                  * we need to check this condition once z property is added
214                  */
215                 BUG_ON(stage > hw_cfg->lm.nb_stages);
216
217                 /* LM */
218                 mdp5_write(mdp5_kms,
219                                 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
220                                 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
221                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
222                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
223                                 blender(stage)), 0xff);
224                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
225                                 blender(stage)), 0x00);
226                 /* CTL */
227                 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
228                 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
229                                 pipe2name(mdp5_plane_pipe(plane)), stage);
230         }
231
232         DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
233         mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
234
235 out:
236         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
237 }
238
239 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
240 {
241         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
242         struct mdp5_kms *mdp5_kms = get_kms(crtc);
243         unsigned long flags;
244         struct drm_display_mode *mode;
245
246         if (WARN_ON(!crtc->state))
247                 return;
248
249         mode = &crtc->state->adjusted_mode;
250
251         DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
252                         mdp5_crtc->name, mode->base.id, mode->name,
253                         mode->vrefresh, mode->clock,
254                         mode->hdisplay, mode->hsync_start,
255                         mode->hsync_end, mode->htotal,
256                         mode->vdisplay, mode->vsync_start,
257                         mode->vsync_end, mode->vtotal,
258                         mode->type, mode->flags);
259
260         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
261         mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
262                         MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
263                         MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
264         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
265 }
266
267 static void mdp5_crtc_disable(struct drm_crtc *crtc)
268 {
269         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
270         struct mdp5_kms *mdp5_kms = get_kms(crtc);
271
272         DBG("%s", mdp5_crtc->name);
273
274         if (WARN_ON(!mdp5_crtc->enabled))
275                 return;
276
277         /* set STAGE_UNUSED for all layers */
278         mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
279
280         mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
281         mdp5_disable(mdp5_kms);
282
283         mdp5_crtc->enabled = false;
284 }
285
286 static void mdp5_crtc_enable(struct drm_crtc *crtc)
287 {
288         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
289         struct mdp5_kms *mdp5_kms = get_kms(crtc);
290
291         DBG("%s", mdp5_crtc->name);
292
293         if (WARN_ON(mdp5_crtc->enabled))
294                 return;
295
296         mdp5_enable(mdp5_kms);
297         mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
298
299         mdp5_crtc->enabled = true;
300 }
301
302 struct plane_state {
303         struct drm_plane *plane;
304         struct mdp5_plane_state *state;
305 };
306
307 static int pstate_cmp(const void *a, const void *b)
308 {
309         struct plane_state *pa = (struct plane_state *)a;
310         struct plane_state *pb = (struct plane_state *)b;
311         return pa->state->zpos - pb->state->zpos;
312 }
313
314 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
315                 struct drm_crtc_state *state)
316 {
317         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
318         struct mdp5_kms *mdp5_kms = get_kms(crtc);
319         struct drm_plane *plane;
320         struct drm_device *dev = crtc->dev;
321         struct plane_state pstates[STAGE3 + 1];
322         int cnt = 0, i;
323
324         DBG("%s: check", mdp5_crtc->name);
325
326         /* request a free CTL, if none is already allocated for this CRTC */
327         if (state->enable && !mdp5_crtc->ctl) {
328                 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
329                 if (WARN_ON(!mdp5_crtc->ctl))
330                         return -EINVAL;
331         }
332
333         /* verify that there are not too many planes attached to crtc
334          * and that we don't have conflicting mixer stages:
335          */
336         drm_atomic_crtc_state_for_each_plane(plane, state) {
337                 struct drm_plane_state *pstate;
338
339                 if (cnt >= ARRAY_SIZE(pstates)) {
340                         dev_err(dev->dev, "too many planes!\n");
341                         return -EINVAL;
342                 }
343
344                 pstate = state->state->plane_states[drm_plane_index(plane)];
345
346                 /* plane might not have changed, in which case take
347                  * current state:
348                  */
349                 if (!pstate)
350                         pstate = plane->state;
351
352                 pstates[cnt].plane = plane;
353                 pstates[cnt].state = to_mdp5_plane_state(pstate);
354
355                 cnt++;
356         }
357
358         sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
359
360         for (i = 0; i < cnt; i++) {
361                 pstates[i].state->stage = STAGE_BASE + i;
362                 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
363                                 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
364                                 pstates[i].state->stage);
365         }
366
367         return 0;
368 }
369
370 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
371 {
372         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
373         DBG("%s: begin", mdp5_crtc->name);
374 }
375
376 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
377 {
378         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
379         struct drm_device *dev = crtc->dev;
380         unsigned long flags;
381
382         DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
383
384         WARN_ON(mdp5_crtc->event);
385
386         spin_lock_irqsave(&dev->event_lock, flags);
387         mdp5_crtc->event = crtc->state->event;
388         spin_unlock_irqrestore(&dev->event_lock, flags);
389
390         /*
391          * If no CTL has been allocated in mdp5_crtc_atomic_check(),
392          * it means we are trying to flush a CRTC whose state is disabled:
393          * nothing else needs to be done.
394          */
395         if (unlikely(!mdp5_crtc->ctl))
396                 return;
397
398         blend_setup(crtc);
399         crtc_flush_all(crtc);
400         request_pending(crtc, PENDING_FLIP);
401 }
402
403 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
404                 struct drm_property *property, uint64_t val)
405 {
406         // XXX
407         return -EINVAL;
408 }
409
410 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
411 {
412         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
413         uint32_t xres = crtc->mode.hdisplay;
414         uint32_t yres = crtc->mode.vdisplay;
415
416         /*
417          * Cursor Region Of Interest (ROI) is a plane read from cursor
418          * buffer to render. The ROI region is determined by the visibility of
419          * the cursor point. In the default Cursor image the cursor point will
420          * be at the top left of the cursor image, unless it is specified
421          * otherwise using hotspot feature.
422          *
423          * If the cursor point reaches the right (xres - x < cursor.width) or
424          * bottom (yres - y < cursor.height) boundary of the screen, then ROI
425          * width and ROI height need to be evaluated to crop the cursor image
426          * accordingly.
427          * (xres-x) will be new cursor width when x > (xres - cursor.width)
428          * (yres-y) will be new cursor height when y > (yres - cursor.height)
429          */
430         *roi_w = min(mdp5_crtc->cursor.width, xres -
431                         mdp5_crtc->cursor.x);
432         *roi_h = min(mdp5_crtc->cursor.height, yres -
433                         mdp5_crtc->cursor.y);
434 }
435
436 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
437                 struct drm_file *file, uint32_t handle,
438                 uint32_t width, uint32_t height)
439 {
440         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
441         struct drm_device *dev = crtc->dev;
442         struct mdp5_kms *mdp5_kms = get_kms(crtc);
443         struct drm_gem_object *cursor_bo, *old_bo = NULL;
444         uint32_t blendcfg, cursor_addr, stride;
445         int ret, bpp, lm;
446         unsigned int depth;
447         enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
448         uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
449         uint32_t roi_w, roi_h;
450         bool cursor_enable = true;
451         unsigned long flags;
452
453         if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
454                 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
455                 return -EINVAL;
456         }
457
458         if (NULL == mdp5_crtc->ctl)
459                 return -EINVAL;
460
461         if (!handle) {
462                 DBG("Cursor off");
463                 cursor_enable = false;
464                 goto set_cursor;
465         }
466
467         cursor_bo = drm_gem_object_lookup(dev, file, handle);
468         if (!cursor_bo)
469                 return -ENOENT;
470
471         ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
472         if (ret)
473                 return -EINVAL;
474
475         lm = mdp5_crtc->lm;
476         drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
477         stride = width * (bpp >> 3);
478
479         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
480         old_bo = mdp5_crtc->cursor.scanout_bo;
481
482         mdp5_crtc->cursor.scanout_bo = cursor_bo;
483         mdp5_crtc->cursor.width = width;
484         mdp5_crtc->cursor.height = height;
485
486         get_roi(crtc, &roi_w, &roi_h);
487
488         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
489         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
490                         MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
491         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
492                         MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
493                         MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
494         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
495                         MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
496                         MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
497         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
498
499         blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
500         blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
501         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
502
503         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
504
505 set_cursor:
506         ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
507         if (ret) {
508                 dev_err(dev->dev, "failed to %sable cursor: %d\n",
509                                 cursor_enable ? "en" : "dis", ret);
510                 goto end;
511         }
512
513         crtc_flush(crtc, flush_mask);
514
515 end:
516         if (old_bo) {
517                 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
518                 /* enable vblank to complete cursor work: */
519                 request_pending(crtc, PENDING_CURSOR);
520         }
521         return ret;
522 }
523
524 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
525 {
526         struct mdp5_kms *mdp5_kms = get_kms(crtc);
527         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
528         uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
529         uint32_t roi_w;
530         uint32_t roi_h;
531         unsigned long flags;
532
533         /* In case the CRTC is disabled, just drop the cursor update */
534         if (unlikely(!crtc->state->enable))
535                 return 0;
536
537         mdp5_crtc->cursor.x = x = max(x, 0);
538         mdp5_crtc->cursor.y = y = max(y, 0);
539
540         get_roi(crtc, &roi_w, &roi_h);
541
542         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
543         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
544                         MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
545                         MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
546         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
547                         MDP5_LM_CURSOR_START_XY_Y_START(y) |
548                         MDP5_LM_CURSOR_START_XY_X_START(x));
549         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
550
551         crtc_flush(crtc, flush_mask);
552
553         return 0;
554 }
555
556 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
557         .set_config = drm_atomic_helper_set_config,
558         .destroy = mdp5_crtc_destroy,
559         .page_flip = drm_atomic_helper_page_flip,
560         .set_property = mdp5_crtc_set_property,
561         .reset = drm_atomic_helper_crtc_reset,
562         .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
563         .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
564         .cursor_set = mdp5_crtc_cursor_set,
565         .cursor_move = mdp5_crtc_cursor_move,
566 };
567
568 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
569         .mode_fixup = mdp5_crtc_mode_fixup,
570         .mode_set_nofb = mdp5_crtc_mode_set_nofb,
571         .disable = mdp5_crtc_disable,
572         .enable = mdp5_crtc_enable,
573         .atomic_check = mdp5_crtc_atomic_check,
574         .atomic_begin = mdp5_crtc_atomic_begin,
575         .atomic_flush = mdp5_crtc_atomic_flush,
576 };
577
578 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
579 {
580         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
581         struct drm_crtc *crtc = &mdp5_crtc->base;
582         struct msm_drm_private *priv = crtc->dev->dev_private;
583         unsigned pending;
584
585         mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
586
587         pending = atomic_xchg(&mdp5_crtc->pending, 0);
588
589         if (pending & PENDING_FLIP) {
590                 complete_flip(crtc, NULL);
591         }
592
593         if (pending & PENDING_CURSOR)
594                 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
595 }
596
597 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
598 {
599         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
600
601         DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
602 }
603
604 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
605 {
606         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
607         return mdp5_crtc->vblank.irqmask;
608 }
609
610 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
611 {
612         DBG("cancel: %p", file);
613         complete_flip(crtc, file);
614 }
615
616 /* set interface for routing crtc->encoder: */
617 void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
618 {
619         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
620         struct mdp5_kms *mdp5_kms = get_kms(crtc);
621         int lm = mdp5_crtc_get_lm(crtc);
622
623         /* now that we know what irq's we want: */
624         mdp5_crtc->err.irqmask = intf2err(intf->num);
625
626         /* Register command mode Pingpong done as vblank for now,
627          * so that atomic commit should wait for it to finish.
628          * Ideally, in the future, we should take rd_ptr done as vblank,
629          * and let atomic commit wait for pingpong done for commond mode.
630          */
631         if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
632                 mdp5_crtc->vblank.irqmask = lm2ppdone(lm);
633         else
634                 mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
635         mdp_irq_update(&mdp5_kms->base);
636
637         mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
638 }
639
640 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
641 {
642         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
643         return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
644 }
645
646 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
647 {
648         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
649         return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
650 }
651
652 /* initialize crtc */
653 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
654                 struct drm_plane *plane, int id)
655 {
656         struct drm_crtc *crtc = NULL;
657         struct mdp5_crtc *mdp5_crtc;
658
659         mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
660         if (!mdp5_crtc)
661                 return ERR_PTR(-ENOMEM);
662
663         crtc = &mdp5_crtc->base;
664
665         mdp5_crtc->id = id;
666         mdp5_crtc->lm = GET_LM_ID(id);
667
668         spin_lock_init(&mdp5_crtc->lm_lock);
669         spin_lock_init(&mdp5_crtc->cursor.lock);
670
671         mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
672         mdp5_crtc->err.irq = mdp5_crtc_err_irq;
673
674         snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
675                         pipe2name(mdp5_plane_pipe(plane)), id);
676
677         drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
678
679         drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
680                         "unref cursor", unref_cursor_worker);
681
682         drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
683         plane->crtc = crtc;
684
685         mdp5_plane_install_properties(plane, &crtc->base);
686
687         return crtc;
688 }