These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
index c153077..7f9f4ac 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -46,6 +46,11 @@ struct mdp5_crtc {
        /* if there is a pending flip, these will be non-null: */
        struct drm_pending_vblank_event *event;
 
+       /* Bits have been flushed at the last commit,
+        * used to decide if a vsync has happened since last commit.
+        */
+       u32 flushed_mask;
+
 #define PENDING_CURSOR 0x1
 #define PENDING_FLIP   0x2
        atomic_t pending;
@@ -55,6 +60,11 @@ struct mdp5_crtc {
 
        struct mdp_irq vblank;
        struct mdp_irq err;
+       struct mdp_irq pp_done;
+
+       struct completion pp_completion;
+
+       bool cmd_mode;
 
        struct {
                /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
@@ -82,12 +92,18 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
        mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
 }
 
-static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
+static void request_pp_done_pending(struct drm_crtc *crtc)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       reinit_completion(&mdp5_crtc->pp_completion);
+}
+
+static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 
        DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
-       mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
+       return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
 }
 
 /*
@@ -95,7 +111,7 @@ static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
  * so that we can safely queue unref to current fb (ie. next
  * vblank we know hw is done w/ previous scanout_fb).
  */
-static void crtc_flush_all(struct drm_crtc *crtc)
+static u32 crtc_flush_all(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct drm_plane *plane;
@@ -103,7 +119,7 @@ static void crtc_flush_all(struct drm_crtc *crtc)
 
        /* this should not happen: */
        if (WARN_ON(!mdp5_crtc->ctl))
-               return;
+               return 0;
 
        drm_atomic_crtc_for_each_plane(plane, crtc) {
                flush_mask |= mdp5_plane_get_flush(plane);
@@ -111,7 +127,7 @@ static void crtc_flush_all(struct drm_crtc *crtc)
 
        flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
 
-       crtc_flush(crtc, flush_mask);
+       return crtc_flush(crtc, flush_mask);
 }
 
 /* if file!=NULL, this is preclose potential cancel-flip path */
@@ -143,7 +159,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
        }
 
        if (mdp5_crtc->ctl && !crtc->state->enable) {
-               mdp5_ctl_release(mdp5_crtc->ctl);
+               /* set STAGE_UNUSED for all layers */
+               mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
                mdp5_crtc->ctl = NULL;
        }
 }
@@ -178,13 +195,9 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
 /*
  * blend_setup() - blend all the planes of a CRTC
  *
- * When border is enabled, the border color will ALWAYS be the base layer.
- * Therefore, the first plane (private RGB pipe) will start at STAGE0.
- * If disabled, the first plane starts at STAGE_BASE.
- *
- * Note:
- * Border is not enabled here because the private plane is exactly
- * the CRTC resolution.
+ * If no base layer is available, border will be enabled as the base layer.
+ * Otherwise all layers will be blended based on their stage calculated
+ * in mdp5_crtc_atomic_check.
  */
 static void blend_setup(struct drm_crtc *crtc)
 {
@@ -192,9 +205,14 @@ static void blend_setup(struct drm_crtc *crtc)
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
        struct drm_plane *plane;
        const struct mdp5_cfg_hw *hw_cfg;
-       uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+       struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
+       const struct mdp_format *format;
+       uint32_t lm = mdp5_crtc->lm;
+       uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
        unsigned long flags;
-#define blender(stage) ((stage) - STAGE_BASE)
+       uint8_t stage[STAGE_MAX + 1];
+       int i, plane_cnt = 0;
+#define blender(stage) ((stage) - STAGE0)
 
        hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 
@@ -204,33 +222,73 @@ static void blend_setup(struct drm_crtc *crtc)
        if (!mdp5_crtc->ctl)
                goto out;
 
+       /* Collect all plane information */
        drm_atomic_crtc_for_each_plane(plane, crtc) {
-               enum mdp_mixer_stage_id stage =
-                       to_mdp5_plane_state(plane->state)->stage;
+               pstate = to_mdp5_plane_state(plane->state);
+               pstates[pstate->stage] = pstate;
+               stage[pstate->stage] = mdp5_plane_pipe(plane);
+               plane_cnt++;
+       }
 
-               /*
-                * Note: This cannot happen with current implementation but
-                * we need to check this condition once z property is added
-                */
-               BUG_ON(stage > hw_cfg->lm.nb_stages);
+       /*
+       * If there is no base layer, enable border color.
+       * Although it's not possbile in current blend logic,
+       * put it here as a reminder.
+       */
+       if (!pstates[STAGE_BASE] && plane_cnt) {
+               ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
+               DBG("Border Color is enabled");
+       }
+
+       /* The reset for blending */
+       for (i = STAGE0; i <= STAGE_MAX; i++) {
+               if (!pstates[i])
+                       continue;
+
+               format = to_mdp_format(
+                       msm_framebuffer_format(pstates[i]->base.fb));
+               plane = pstates[i]->base.plane;
+               blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+                       MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
+               fg_alpha = pstates[i]->alpha;
+               bg_alpha = 0xFF - pstates[i]->alpha;
+               DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
+
+               if (format->alpha_enable && pstates[i]->premultiplied) {
+                       blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+                               MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+                       if (fg_alpha != 0xff) {
+                               bg_alpha = fg_alpha;
+                               blend_op |=
+                                       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+                                       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+                       } else {
+                               blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+                       }
+               } else if (format->alpha_enable) {
+                       blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
+                               MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+                       if (fg_alpha != 0xff) {
+                               bg_alpha = fg_alpha;
+                               blend_op |=
+                                      MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
+                                      MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
+                                      MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+                                      MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+                       } else {
+                               blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+                       }
+               }
 
-               /* LM */
-               mdp5_write(mdp5_kms,
-                               REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
-                               MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
-                               MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
+               mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
+                               blender(i)), blend_op);
                mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
-                               blender(stage)), 0xff);
+                               blender(i)), fg_alpha);
                mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
-                               blender(stage)), 0x00);
-               /* CTL */
-               blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
-               DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
-                               pipe2name(mdp5_plane_pipe(plane)), stage);
+                               blender(i)), bg_alpha);
        }
 
-       DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
-       mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+       mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
 
 out:
        spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
@@ -274,8 +332,8 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
        if (WARN_ON(!mdp5_crtc->enabled))
                return;
 
-       /* set STAGE_UNUSED for all layers */
-       mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
+       if (mdp5_crtc->cmd_mode)
+               mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
 
        mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
        mdp5_disable(mdp5_kms);
@@ -296,6 +354,9 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
        mdp5_enable(mdp5_kms);
        mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
 
+       if (mdp5_crtc->cmd_mode)
+               mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
+
        mdp5_crtc->enabled = true;
 }
 
@@ -318,25 +379,19 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
        struct drm_plane *plane;
        struct drm_device *dev = crtc->dev;
-       struct plane_state pstates[STAGE3 + 1];
+       struct plane_state pstates[STAGE_MAX + 1];
+       const struct mdp5_cfg_hw *hw_cfg;
        int cnt = 0, i;
 
        DBG("%s: check", mdp5_crtc->name);
 
-       /* request a free CTL, if none is already allocated for this CRTC */
-       if (state->enable && !mdp5_crtc->ctl) {
-               mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
-               if (WARN_ON(!mdp5_crtc->ctl))
-                       return -EINVAL;
-       }
-
        /* verify that there are not too many planes attached to crtc
         * and that we don't have conflicting mixer stages:
         */
+       hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
        drm_atomic_crtc_state_for_each_plane(plane, state) {
                struct drm_plane_state *pstate;
-
-               if (cnt >= ARRAY_SIZE(pstates)) {
+               if (cnt >= (hw_cfg->lm.nb_stages)) {
                        dev_err(dev->dev, "too many planes!\n");
                        return -EINVAL;
                }
@@ -348,13 +403,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
                 */
                if (!pstate)
                        pstate = plane->state;
-
                pstates[cnt].plane = plane;
                pstates[cnt].state = to_mdp5_plane_state(pstate);
 
                cnt++;
        }
 
+       /* assign a stage based on sorted zpos property */
        sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 
        for (i = 0; i < cnt; i++) {
@@ -367,13 +422,15 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
        return 0;
 }
 
-static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        DBG("%s: begin", mdp5_crtc->name);
 }
 
-static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct drm_device *dev = crtc->dev;
@@ -396,7 +453,18 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
                return;
 
        blend_setup(crtc);
-       crtc_flush_all(crtc);
+
+       /* PP_DONE irq is only used by command mode for now.
+        * It is better to request pending before FLUSH and START trigger
+        * to make sure no pp_done irq missed.
+        * This is safe because no pp_done will happen before SW trigger
+        * in command mode.
+        */
+       if (mdp5_crtc->cmd_mode)
+               request_pp_done_pending(crtc);
+
+       mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
+
        request_pending(crtc, PENDING_FLIP);
 }
 
@@ -601,6 +669,52 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
        DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
 }
 
+static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+       struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
+                                                               pp_done);
+
+       complete(&mdp5_crtc->pp_completion);
+}
+
+static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       int ret;
+
+       ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
+                                               msecs_to_jiffies(50));
+       if (ret == 0)
+               dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
+}
+
+static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       int ret;
+
+       /* Should not call this function if crtc is disabled. */
+       if (!mdp5_crtc->ctl)
+               return;
+
+       ret = drm_crtc_vblank_get(crtc);
+       if (ret)
+               return;
+
+       ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
+               ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
+               mdp5_crtc->flushed_mask) == 0),
+               msecs_to_jiffies(50));
+       if (ret <= 0)
+               dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
+
+       mdp5_crtc->flushed_mask = 0;
+
+       drm_crtc_vblank_put(crtc);
+}
+
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -613,8 +727,8 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
        complete_flip(crtc, file);
 }
 
-/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+               struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -622,19 +736,23 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
 
        /* now that we know what irq's we want: */
        mdp5_crtc->err.irqmask = intf2err(intf->num);
+       mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
+
+       if ((intf->type == INTF_DSI) &&
+               (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
+               mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
+               mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
+               mdp5_crtc->cmd_mode = true;
+       } else {
+               mdp5_crtc->pp_done.irqmask = 0;
+               mdp5_crtc->pp_done.irq = NULL;
+               mdp5_crtc->cmd_mode = false;
+       }
 
-       /* Register command mode Pingpong done as vblank for now,
-        * so that atomic commit should wait for it to finish.
-        * Ideally, in the future, we should take rd_ptr done as vblank,
-        * and let atomic commit wait for pingpong done for commond mode.
-        */
-       if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
-               mdp5_crtc->vblank.irqmask = lm2ppdone(lm);
-       else
-               mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
        mdp_irq_update(&mdp5_kms->base);
 
-       mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+       mdp5_crtc->ctl = ctl;
+       mdp5_ctl_set_pipeline(ctl, intf, lm);
 }
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
@@ -643,10 +761,14 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc)
        return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
 }
 
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
+void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
 {
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-       return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
+
+       if (mdp5_crtc->cmd_mode)
+               mdp5_crtc_wait_for_pp_done(crtc);
+       else
+               mdp5_crtc_wait_for_flush_done(crtc);
 }
 
 /* initialize crtc */
@@ -667,6 +789,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 
        spin_lock_init(&mdp5_crtc->lm_lock);
        spin_lock_init(&mdp5_crtc->cursor.lock);
+       init_completion(&mdp5_crtc->pp_completion);
 
        mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
        mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -682,7 +805,5 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
        drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
        plane->crtc = crtc;
 
-       mdp5_plane_install_properties(plane, &crtc->base);
-
        return crtc;
 }