These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / i915 / intel_atomic.c
index 3903b90..f1975f2 100644 (file)
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
 
-
-/**
- * intel_atomic_check - validate state object
- * @dev: drm device
- * @state: state to validate
- */
-int intel_atomic_check(struct drm_device *dev,
-                      struct drm_atomic_state *state)
-{
-       int nplanes = dev->mode_config.num_total_plane;
-       int ncrtcs = dev->mode_config.num_crtc;
-       int nconnectors = dev->mode_config.num_connector;
-       enum pipe nuclear_pipe = INVALID_PIPE;
-       int ret;
-       int i;
-       bool not_nuclear = false;
-
-       /*
-        * FIXME:  At the moment, we only support "nuclear pageflip" on a
-        * single CRTC.  Cross-crtc updates will be added later.
-        */
-       for (i = 0; i < nplanes; i++) {
-               struct intel_plane *plane = to_intel_plane(state->planes[i]);
-               if (!plane)
-                       continue;
-
-               if (nuclear_pipe == INVALID_PIPE) {
-                       nuclear_pipe = plane->pipe;
-               } else if (nuclear_pipe != plane->pipe) {
-                       DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
-                       return -EINVAL;
-               }
-       }
-
-       /*
-        * FIXME:  We only handle planes for now; make sure there are no CRTC's
-        * or connectors involved.
-        */
-       state->allow_modeset = false;
-       for (i = 0; i < ncrtcs; i++) {
-               struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
-               if (crtc && crtc->pipe != nuclear_pipe)
-                       not_nuclear = true;
-       }
-       for (i = 0; i < nconnectors; i++)
-               if (state->connectors[i] != NULL)
-                       not_nuclear = true;
-
-       if (not_nuclear) {
-               DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
-               return -EINVAL;
-       }
-
-       ret = drm_atomic_helper_check_planes(dev, state);
-       if (ret)
-               return ret;
-
-       return ret;
-}
-
-
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @async: asynchronous commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
- * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int intel_atomic_commit(struct drm_device *dev,
-                       struct drm_atomic_state *state,
-                       bool async)
-{
-       int ret;
-       int i;
-
-       if (async) {
-               DRM_DEBUG_KMS("i915 does not yet support async commit\n");
-               return -EINVAL;
-       }
-
-       ret = drm_atomic_helper_prepare_planes(dev, state);
-       if (ret)
-               return ret;
-
-       /* Point of no return */
-
-       /*
-        * FIXME:  The proper sequence here will eventually be:
-        *
-        * drm_atomic_helper_swap_state(dev, state)
-        * drm_atomic_helper_commit_modeset_disables(dev, state);
-        * drm_atomic_helper_commit_planes(dev, state);
-        * drm_atomic_helper_commit_modeset_enables(dev, state);
-        * drm_atomic_helper_wait_for_vblanks(dev, state);
-        * drm_atomic_helper_cleanup_planes(dev, state);
-        * drm_atomic_state_free(state);
-        *
-        * once we have full atomic modeset.  For now, just manually update
-        * plane states to avoid clobbering good states with dummy states
-        * while nuclear pageflipping.
-        */
-       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
-               struct drm_plane *plane = state->planes[i];
-
-               if (!plane)
-                       continue;
-
-               plane->state->state = state;
-               swap(state->plane_states[i], plane->state);
-               plane->state->state = NULL;
-       }
-       drm_atomic_helper_commit_planes(dev, state);
-       drm_atomic_helper_wait_for_vblanks(dev, state);
-       drm_atomic_helper_cleanup_planes(dev, state);
-       drm_atomic_state_free(state);
-
-       return 0;
-}
-
 /**
  * intel_connector_atomic_get_property - fetch connector property value
  * @connector: connector to fetch property for
@@ -213,17 +85,15 @@ intel_connector_atomic_get_property(struct drm_connector *connector,
 struct drm_crtc_state *
 intel_crtc_duplicate_state(struct drm_crtc *crtc)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *crtc_state;
 
-       if (WARN_ON(!intel_crtc->config))
-               crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-       else
-               crtc_state = kmemdup(intel_crtc->config,
-                                    sizeof(*intel_crtc->config), GFP_KERNEL);
+       crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
+       if (!crtc_state)
+               return NULL;
+
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
 
-       if (crtc_state)
-               crtc_state->base.crtc = crtc;
+       crtc_state->update_pipe = false;
 
        return &crtc_state->base;
 }
@@ -241,3 +111,201 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
 {
        drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
+
+/**
+ * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
+ * @dev: DRM device
+ * @crtc: intel crtc
+ * @crtc_state: incoming crtc_state to validate and setup scalers
+ *
+ * This function sets up scalers based on staged scaling requests for
+ * a @crtc and its planes. It is called from crtc level check path. If request
+ * is a supportable request, it attaches scalers to requested planes and crtc.
+ *
+ * This function takes into account the current scaler(s) in use by any planes
+ * not being part of this atomic state
+ *
+ *  Returns:
+ *         0 - scalers were setup succesfully
+ *         error code - otherwise
+ */
+int intel_atomic_setup_scalers(struct drm_device *dev,
+       struct intel_crtc *intel_crtc,
+       struct intel_crtc_state *crtc_state)
+{
+       struct drm_plane *plane = NULL;
+       struct intel_plane *intel_plane;
+       struct intel_plane_state *plane_state = NULL;
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
+       struct drm_atomic_state *drm_state = crtc_state->base.state;
+       int num_scalers_need;
+       int i, j;
+
+       num_scalers_need = hweight32(scaler_state->scaler_users);
+
+       /*
+        * High level flow:
+        * - staged scaler requests are already in scaler_state->scaler_users
+        * - check whether staged scaling requests can be supported
+        * - add planes using scalers that aren't in current transaction
+        * - assign scalers to requested users
+        * - as part of plane commit, scalers will be committed
+        *   (i.e., either attached or detached) to respective planes in hw
+        * - as part of crtc_commit, scaler will be either attached or detached
+        *   to crtc in hw
+        */
+
+       /* fail if required scalers > available scalers */
+       if (num_scalers_need > intel_crtc->num_scalers){
+               DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
+                       num_scalers_need, intel_crtc->num_scalers);
+               return -EINVAL;
+       }
+
+       /* walkthrough scaler_users bits and start assigning scalers */
+       for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+               int *scaler_id;
+               const char *name;
+               int idx;
+
+               /* skip if scaler not required */
+               if (!(scaler_state->scaler_users & (1 << i)))
+                       continue;
+
+               if (i == SKL_CRTC_INDEX) {
+                       name = "CRTC";
+                       idx = intel_crtc->base.base.id;
+
+                       /* panel fitter case: assign as a crtc scaler */
+                       scaler_id = &scaler_state->scaler_id;
+               } else {
+                       name = "PLANE";
+
+                       /* plane scaler case: assign as a plane scaler */
+                       /* find the plane that set the bit as scaler_user */
+                       plane = drm_state->planes[i];
+
+                       /*
+                        * to enable/disable hq mode, add planes that are using scaler
+                        * into this transaction
+                        */
+                       if (!plane) {
+                               struct drm_plane_state *state;
+                               plane = drm_plane_from_index(dev, i);
+                               state = drm_atomic_get_plane_state(drm_state, plane);
+                               if (IS_ERR(state)) {
+                                       DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
+                                               plane->base.id);
+                                       return PTR_ERR(state);
+                               }
+
+                               /*
+                                * the plane is added after plane checks are run,
+                                * but since this plane is unchanged just do the
+                                * minimum required validation.
+                                */
+                               if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+                                       intel_crtc->atomic.wait_for_flips = true;
+                               crtc_state->base.planes_changed = true;
+                       }
+
+                       intel_plane = to_intel_plane(plane);
+                       idx = plane->base.id;
+
+                       /* plane on different crtc cannot be a scaler user of this crtc */
+                       if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
+                               continue;
+                       }
+
+                       plane_state = to_intel_plane_state(drm_state->plane_states[i]);
+                       scaler_id = &plane_state->scaler_id;
+               }
+
+               if (*scaler_id < 0) {
+                       /* find a free scaler */
+                       for (j = 0; j < intel_crtc->num_scalers; j++) {
+                               if (!scaler_state->scalers[j].in_use) {
+                                       scaler_state->scalers[j].in_use = 1;
+                                       *scaler_id = j;
+                                       DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+                                               intel_crtc->pipe, *scaler_id, name, idx);
+                                       break;
+                               }
+                       }
+               }
+
+               if (WARN_ON(*scaler_id < 0)) {
+                       DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
+                       continue;
+               }
+
+               /* set scaler mode */
+               if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
+                       /*
+                        * when only 1 scaler is in use on either pipe A or B,
+                        * scaler 0 operates in high quality (HQ) mode.
+                        * In this case use scaler 0 to take advantage of HQ mode
+                        */
+                       *scaler_id = 0;
+                       scaler_state->scalers[0].in_use = 1;
+                       scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
+                       scaler_state->scalers[1].in_use = 0;
+               } else {
+                       scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
+               }
+       }
+
+       return 0;
+}
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll_config *shared_dpll)
+{
+       enum intel_dpll_id i;
+
+       /* Copy shared dpll state */
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+               shared_dpll[i] = pll->config;
+       }
+}
+
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+       struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+       WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+       if (!state->dpll_set) {
+               state->dpll_set = true;
+
+               intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+                                                 state->shared_dpll);
+       }
+
+       return state->shared_dpll;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+       struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+       if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+               kfree(state);
+               return NULL;
+       }
+
+       return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+       struct intel_atomic_state *state = to_intel_atomic_state(s);
+       drm_atomic_state_default_clear(&state->base);
+       state->dpll_set = false;
+}