Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / gpu / drm / rcar-du / rcar_du_kms.c
1 /*
2  * rcar_du_kms.c  --  R-Car Display Unit Mode Setting
3  *
4  * Copyright (C) 2013-2014 Renesas Electronics Corporation
5  *
6  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <drm/drmP.h>
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/drm_fb_cma_helper.h>
20 #include <drm/drm_gem_cma_helper.h>
21
22 #include <linux/of_graph.h>
23 #include <linux/wait.h>
24
25 #include "rcar_du_crtc.h"
26 #include "rcar_du_drv.h"
27 #include "rcar_du_encoder.h"
28 #include "rcar_du_kms.h"
29 #include "rcar_du_lvdsenc.h"
30 #include "rcar_du_regs.h"
31
32 /* -----------------------------------------------------------------------------
33  * Format helpers
34  */
35
36 static const struct rcar_du_format_info rcar_du_format_infos[] = {
37         {
38                 .fourcc = DRM_FORMAT_RGB565,
39                 .bpp = 16,
40                 .planes = 1,
41                 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
42                 .edf = PnDDCR4_EDF_NONE,
43         }, {
44                 .fourcc = DRM_FORMAT_ARGB1555,
45                 .bpp = 16,
46                 .planes = 1,
47                 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
48                 .edf = PnDDCR4_EDF_NONE,
49         }, {
50                 .fourcc = DRM_FORMAT_XRGB1555,
51                 .bpp = 16,
52                 .planes = 1,
53                 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
54                 .edf = PnDDCR4_EDF_NONE,
55         }, {
56                 .fourcc = DRM_FORMAT_XRGB8888,
57                 .bpp = 32,
58                 .planes = 1,
59                 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
60                 .edf = PnDDCR4_EDF_RGB888,
61         }, {
62                 .fourcc = DRM_FORMAT_ARGB8888,
63                 .bpp = 32,
64                 .planes = 1,
65                 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
66                 .edf = PnDDCR4_EDF_ARGB8888,
67         }, {
68                 .fourcc = DRM_FORMAT_UYVY,
69                 .bpp = 16,
70                 .planes = 1,
71                 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
72                 .edf = PnDDCR4_EDF_NONE,
73         }, {
74                 .fourcc = DRM_FORMAT_YUYV,
75                 .bpp = 16,
76                 .planes = 1,
77                 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
78                 .edf = PnDDCR4_EDF_NONE,
79         }, {
80                 .fourcc = DRM_FORMAT_NV12,
81                 .bpp = 12,
82                 .planes = 2,
83                 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
84                 .edf = PnDDCR4_EDF_NONE,
85         }, {
86                 .fourcc = DRM_FORMAT_NV21,
87                 .bpp = 12,
88                 .planes = 2,
89                 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
90                 .edf = PnDDCR4_EDF_NONE,
91         }, {
92                 /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
93                 .fourcc = DRM_FORMAT_NV16,
94                 .bpp = 16,
95                 .planes = 2,
96                 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
97                 .edf = PnDDCR4_EDF_NONE,
98         },
99 };
100
101 const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
102 {
103         unsigned int i;
104
105         for (i = 0; i < ARRAY_SIZE(rcar_du_format_infos); ++i) {
106                 if (rcar_du_format_infos[i].fourcc == fourcc)
107                         return &rcar_du_format_infos[i];
108         }
109
110         return NULL;
111 }
112
113 /* -----------------------------------------------------------------------------
114  * Frame buffer
115  */
116
117 int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
118                         struct drm_mode_create_dumb *args)
119 {
120         struct rcar_du_device *rcdu = dev->dev_private;
121         unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
122         unsigned int align;
123
124         /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
125          * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
126          */
127         if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
128                 align = 128;
129         else
130                 align = 16 * args->bpp / 8;
131
132         args->pitch = roundup(min_pitch, align);
133
134         return drm_gem_cma_dumb_create_internal(file, dev, args);
135 }
136
137 static struct drm_framebuffer *
138 rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
139                   struct drm_mode_fb_cmd2 *mode_cmd)
140 {
141         struct rcar_du_device *rcdu = dev->dev_private;
142         const struct rcar_du_format_info *format;
143         unsigned int max_pitch;
144         unsigned int align;
145         unsigned int bpp;
146
147         format = rcar_du_format_info(mode_cmd->pixel_format);
148         if (format == NULL) {
149                 dev_dbg(dev->dev, "unsupported pixel format %08x\n",
150                         mode_cmd->pixel_format);
151                 return ERR_PTR(-EINVAL);
152         }
153
154         /*
155          * The pitch and alignment constraints are expressed in pixels on the
156          * hardware side and in bytes in the DRM API.
157          */
158         bpp = format->planes == 2 ? 1 : format->bpp / 8;
159         max_pitch =  4096 * bpp;
160
161         if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
162                 align = 128;
163         else
164                 align = 16 * bpp;
165
166         if (mode_cmd->pitches[0] & (align - 1) ||
167             mode_cmd->pitches[0] >= max_pitch) {
168                 dev_dbg(dev->dev, "invalid pitch value %u\n",
169                         mode_cmd->pitches[0]);
170                 return ERR_PTR(-EINVAL);
171         }
172
173         if (format->planes == 2) {
174                 if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
175                         dev_dbg(dev->dev,
176                                 "luma and chroma pitches do not match\n");
177                         return ERR_PTR(-EINVAL);
178                 }
179         }
180
181         return drm_fb_cma_create(dev, file_priv, mode_cmd);
182 }
183
184 static void rcar_du_output_poll_changed(struct drm_device *dev)
185 {
186         struct rcar_du_device *rcdu = dev->dev_private;
187
188         drm_fbdev_cma_hotplug_event(rcdu->fbdev);
189 }
190
191 /* -----------------------------------------------------------------------------
192  * Atomic Check and Update
193  */
194
195 /*
196  * Atomic hardware plane allocator
197  *
198  * The hardware plane allocator is solely based on the atomic plane states
199  * without keeping any external state to avoid races between .atomic_check()
200  * and .atomic_commit().
201  *
202  * The core idea is to avoid using a free planes bitmask that would need to be
203  * shared between check and commit handlers with a collective knowledge based on
204  * the allocated hardware plane(s) for each KMS plane. The allocator then loops
205  * over all plane states to compute the free planes bitmask, allocates hardware
206  * planes based on that bitmask, and stores the result back in the plane states.
207  *
208  * For this to work we need to access the current state of planes not touched by
209  * the atomic update. To ensure that it won't be modified, we need to lock all
210  * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
211  * updates from .atomic_check() up to completion (when swapping the states if
212  * the check step has succeeded) or rollback (when freeing the states if the
213  * check step has failed).
214  *
215  * Allocation is performed in the .atomic_check() handler and applied
216  * automatically when the core swaps the old and new states.
217  */
218
219 static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
220                                         struct rcar_du_plane_state *state)
221 {
222         const struct rcar_du_format_info *cur_format;
223
224         cur_format = to_rcar_du_plane_state(plane->plane.state)->format;
225
226         /* Lowering the number of planes doesn't strictly require reallocation
227          * as the extra hardware plane will be freed when committing, but doing
228          * so could lead to more fragmentation.
229          */
230         return !cur_format || cur_format->planes != state->format->planes;
231 }
232
233 static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
234 {
235         unsigned int mask;
236
237         if (state->hwindex == -1)
238                 return 0;
239
240         mask = 1 << state->hwindex;
241         if (state->format->planes == 2)
242                 mask |= 1 << ((state->hwindex + 1) % 8);
243
244         return mask;
245 }
246
247 static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
248 {
249         unsigned int i;
250
251         for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
252                 if (!(free & (1 << i)))
253                         continue;
254
255                 if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
256                         break;
257         }
258
259         return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
260 }
261
262 static int rcar_du_atomic_check(struct drm_device *dev,
263                                 struct drm_atomic_state *state)
264 {
265         struct rcar_du_device *rcdu = dev->dev_private;
266         unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
267         unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
268         bool needs_realloc = false;
269         unsigned int groups = 0;
270         unsigned int i;
271         int ret;
272
273         ret = drm_atomic_helper_check(dev, state);
274         if (ret < 0)
275                 return ret;
276
277         /* Check if hardware planes need to be reallocated. */
278         for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
279                 struct rcar_du_plane_state *plane_state;
280                 struct rcar_du_plane *plane;
281                 unsigned int index;
282
283                 if (!state->planes[i])
284                         continue;
285
286                 plane = to_rcar_plane(state->planes[i]);
287                 plane_state = to_rcar_du_plane_state(state->plane_states[i]);
288
289                 /* If the plane is being disabled we don't need to go through
290                  * the full reallocation procedure. Just mark the hardware
291                  * plane(s) as freed.
292                  */
293                 if (!plane_state->format) {
294                         index = plane - plane->group->planes.planes;
295                         group_freed_planes[plane->group->index] |= 1 << index;
296                         plane_state->hwindex = -1;
297                         continue;
298                 }
299
300                 /* If the plane needs to be reallocated mark it as such, and
301                  * mark the hardware plane(s) as free.
302                  */
303                 if (rcar_du_plane_needs_realloc(plane, plane_state)) {
304                         groups |= 1 << plane->group->index;
305                         needs_realloc = true;
306
307                         index = plane - plane->group->planes.planes;
308                         group_freed_planes[plane->group->index] |= 1 << index;
309                         plane_state->hwindex = -1;
310                 }
311         }
312
313         if (!needs_realloc)
314                 return 0;
315
316         /* Grab all plane states for the groups that need reallocation to ensure
317          * locking and avoid racy updates. This serializes the update operation,
318          * but there's not much we can do about it as that's the hardware
319          * design.
320          *
321          * Compute the used planes mask for each group at the same time to avoid
322          * looping over the planes separately later.
323          */
324         while (groups) {
325                 unsigned int index = ffs(groups) - 1;
326                 struct rcar_du_group *group = &rcdu->groups[index];
327                 unsigned int used_planes = 0;
328
329                 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
330                         struct rcar_du_plane *plane = &group->planes.planes[i];
331                         struct rcar_du_plane_state *plane_state;
332                         struct drm_plane_state *s;
333
334                         s = drm_atomic_get_plane_state(state, &plane->plane);
335                         if (IS_ERR(s))
336                                 return PTR_ERR(s);
337
338                         /* If the plane has been freed in the above loop its
339                          * hardware planes must not be added to the used planes
340                          * bitmask. However, the current state doesn't reflect
341                          * the free state yet, as we've modified the new state
342                          * above. Use the local freed planes list to check for
343                          * that condition instead.
344                          */
345                         if (group_freed_planes[index] & (1 << i))
346                                 continue;
347
348                         plane_state = to_rcar_du_plane_state(plane->plane.state);
349                         used_planes |= rcar_du_plane_hwmask(plane_state);
350                 }
351
352                 group_free_planes[index] = 0xff & ~used_planes;
353                 groups &= ~(1 << index);
354         }
355
356         /* Reallocate hardware planes for each plane that needs it. */
357         for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
358                 struct rcar_du_plane_state *plane_state;
359                 struct rcar_du_plane *plane;
360                 int idx;
361
362                 if (!state->planes[i])
363                         continue;
364
365                 plane = to_rcar_plane(state->planes[i]);
366                 plane_state = to_rcar_du_plane_state(state->plane_states[i]);
367
368                 /* Skip planes that are being disabled or don't need to be
369                  * reallocated.
370                  */
371                 if (!plane_state->format ||
372                     !rcar_du_plane_needs_realloc(plane, plane_state))
373                         continue;
374
375                 idx = rcar_du_plane_hwalloc(plane_state->format->planes,
376                                         group_free_planes[plane->group->index]);
377                 if (idx < 0) {
378                         dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
379                                 __func__);
380                         return idx;
381                 }
382
383                 plane_state->hwindex = idx;
384
385                 group_free_planes[plane->group->index] &=
386                         ~rcar_du_plane_hwmask(plane_state);
387         }
388
389         return 0;
390 }
391
392 struct rcar_du_commit {
393         struct work_struct work;
394         struct drm_device *dev;
395         struct drm_atomic_state *state;
396         u32 crtcs;
397 };
398
399 static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
400 {
401         struct drm_device *dev = commit->dev;
402         struct rcar_du_device *rcdu = dev->dev_private;
403         struct drm_atomic_state *old_state = commit->state;
404
405         /* Apply the atomic update. */
406         drm_atomic_helper_commit_modeset_disables(dev, old_state);
407         drm_atomic_helper_commit_modeset_enables(dev, old_state);
408         drm_atomic_helper_commit_planes(dev, old_state);
409
410         drm_atomic_helper_wait_for_vblanks(dev, old_state);
411
412         drm_atomic_helper_cleanup_planes(dev, old_state);
413
414         drm_atomic_state_free(old_state);
415
416         /* Complete the commit, wake up any waiter. */
417         spin_lock(&rcdu->commit.wait.lock);
418         rcdu->commit.pending &= ~commit->crtcs;
419         wake_up_all_locked(&rcdu->commit.wait);
420         spin_unlock(&rcdu->commit.wait.lock);
421
422         kfree(commit);
423 }
424
425 static void rcar_du_atomic_work(struct work_struct *work)
426 {
427         struct rcar_du_commit *commit =
428                 container_of(work, struct rcar_du_commit, work);
429
430         rcar_du_atomic_complete(commit);
431 }
432
433 static int rcar_du_atomic_commit(struct drm_device *dev,
434                                  struct drm_atomic_state *state, bool async)
435 {
436         struct rcar_du_device *rcdu = dev->dev_private;
437         struct rcar_du_commit *commit;
438         unsigned int i;
439         int ret;
440
441         ret = drm_atomic_helper_prepare_planes(dev, state);
442         if (ret)
443                 return ret;
444
445         /* Allocate the commit object. */
446         commit = kzalloc(sizeof(*commit), GFP_KERNEL);
447         if (commit == NULL)
448                 return -ENOMEM;
449
450         INIT_WORK(&commit->work, rcar_du_atomic_work);
451         commit->dev = dev;
452         commit->state = state;
453
454         /* Wait until all affected CRTCs have completed previous commits and
455          * mark them as pending.
456          */
457         for (i = 0; i < dev->mode_config.num_crtc; ++i) {
458                 if (state->crtcs[i])
459                         commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
460         }
461
462         spin_lock(&rcdu->commit.wait.lock);
463         ret = wait_event_interruptible_locked(rcdu->commit.wait,
464                         !(rcdu->commit.pending & commit->crtcs));
465         if (ret == 0)
466                 rcdu->commit.pending |= commit->crtcs;
467         spin_unlock(&rcdu->commit.wait.lock);
468
469         if (ret) {
470                 kfree(commit);
471                 return ret;
472         }
473
474         /* Swap the state, this is the point of no return. */
475         drm_atomic_helper_swap_state(dev, state);
476
477         if (async)
478                 schedule_work(&commit->work);
479         else
480                 rcar_du_atomic_complete(commit);
481
482         return 0;
483 }
484
485 /* -----------------------------------------------------------------------------
486  * Initialization
487  */
488
489 static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
490         .fb_create = rcar_du_fb_create,
491         .output_poll_changed = rcar_du_output_poll_changed,
492         .atomic_check = rcar_du_atomic_check,
493         .atomic_commit = rcar_du_atomic_commit,
494 };
495
496 static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
497                                      enum rcar_du_output output,
498                                      struct of_endpoint *ep)
499 {
500         static const struct {
501                 const char *compatible;
502                 enum rcar_du_encoder_type type;
503         } encoders[] = {
504                 { "adi,adv7123", RCAR_DU_ENCODER_VGA },
505                 { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
506                 { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
507         };
508
509         enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
510         struct device_node *connector = NULL;
511         struct device_node *encoder = NULL;
512         struct device_node *ep_node = NULL;
513         struct device_node *entity_ep_node;
514         struct device_node *entity;
515         int ret;
516
517         /*
518          * Locate the connected entity and infer its type from the number of
519          * endpoints.
520          */
521         entity = of_graph_get_remote_port_parent(ep->local_node);
522         if (!entity) {
523                 dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n",
524                         ep->local_node->full_name);
525                 return 0;
526         }
527
528         entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
529
530         for_each_endpoint_of_node(entity, ep_node) {
531                 if (ep_node == entity_ep_node)
532                         continue;
533
534                 /*
535                  * We've found one endpoint other than the input, this must
536                  * be an encoder. Locate the connector.
537                  */
538                 encoder = entity;
539                 connector = of_graph_get_remote_port_parent(ep_node);
540                 of_node_put(ep_node);
541
542                 if (!connector) {
543                         dev_warn(rcdu->dev,
544                                  "no connector for encoder %s, skipping\n",
545                                  encoder->full_name);
546                         of_node_put(entity_ep_node);
547                         of_node_put(encoder);
548                         return 0;
549                 }
550
551                 break;
552         }
553
554         of_node_put(entity_ep_node);
555
556         if (encoder) {
557                 /*
558                  * If an encoder has been found, get its type based on its
559                  * compatible string.
560                  */
561                 unsigned int i;
562
563                 for (i = 0; i < ARRAY_SIZE(encoders); ++i) {
564                         if (of_device_is_compatible(encoder,
565                                                     encoders[i].compatible)) {
566                                 enc_type = encoders[i].type;
567                                 break;
568                         }
569                 }
570
571                 if (i == ARRAY_SIZE(encoders)) {
572                         dev_warn(rcdu->dev,
573                                  "unknown encoder type for %s, skipping\n",
574                                  encoder->full_name);
575                         of_node_put(encoder);
576                         of_node_put(connector);
577                         return 0;
578                 }
579         } else {
580                 /*
581                  * If no encoder has been found the entity must be the
582                  * connector.
583                  */
584                 connector = entity;
585         }
586
587         ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
588         of_node_put(encoder);
589         of_node_put(connector);
590
591         return ret < 0 ? ret : 1;
592 }
593
594 static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
595 {
596         struct device_node *np = rcdu->dev->of_node;
597         struct device_node *ep_node;
598         unsigned int num_encoders = 0;
599
600         /*
601          * Iterate over the endpoints and create one encoder for each output
602          * pipeline.
603          */
604         for_each_endpoint_of_node(np, ep_node) {
605                 enum rcar_du_output output;
606                 struct of_endpoint ep;
607                 unsigned int i;
608                 int ret;
609
610                 ret = of_graph_parse_endpoint(ep_node, &ep);
611                 if (ret < 0) {
612                         of_node_put(ep_node);
613                         return ret;
614                 }
615
616                 /* Find the output route corresponding to the port number. */
617                 for (i = 0; i < RCAR_DU_OUTPUT_MAX; ++i) {
618                         if (rcdu->info->routes[i].possible_crtcs &&
619                             rcdu->info->routes[i].port == ep.port) {
620                                 output = i;
621                                 break;
622                         }
623                 }
624
625                 if (i == RCAR_DU_OUTPUT_MAX) {
626                         dev_warn(rcdu->dev,
627                                  "port %u references unexisting output, skipping\n",
628                                  ep.port);
629                         continue;
630                 }
631
632                 /* Process the output pipeline. */
633                 ret = rcar_du_encoders_init_one(rcdu, output, &ep);
634                 if (ret < 0) {
635                         if (ret == -EPROBE_DEFER) {
636                                 of_node_put(ep_node);
637                                 return ret;
638                         }
639
640                         dev_info(rcdu->dev,
641                                  "encoder initialization failed, skipping\n");
642                         continue;
643                 }
644
645                 num_encoders += ret;
646         }
647
648         return num_encoders;
649 }
650
651 int rcar_du_modeset_init(struct rcar_du_device *rcdu)
652 {
653         static const unsigned int mmio_offsets[] = {
654                 DU0_REG_OFFSET, DU2_REG_OFFSET
655         };
656
657         struct drm_device *dev = rcdu->ddev;
658         struct drm_encoder *encoder;
659         struct drm_fbdev_cma *fbdev;
660         unsigned int num_encoders;
661         unsigned int num_groups;
662         unsigned int i;
663         int ret;
664
665         drm_mode_config_init(dev);
666
667         dev->mode_config.min_width = 0;
668         dev->mode_config.min_height = 0;
669         dev->mode_config.max_width = 4095;
670         dev->mode_config.max_height = 2047;
671         dev->mode_config.funcs = &rcar_du_mode_config_funcs;
672
673         rcdu->num_crtcs = rcdu->info->num_crtcs;
674
675         /* Initialize the groups. */
676         num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
677
678         for (i = 0; i < num_groups; ++i) {
679                 struct rcar_du_group *rgrp = &rcdu->groups[i];
680
681                 mutex_init(&rgrp->lock);
682
683                 rgrp->dev = rcdu;
684                 rgrp->mmio_offset = mmio_offsets[i];
685                 rgrp->index = i;
686
687                 ret = rcar_du_planes_init(rgrp);
688                 if (ret < 0)
689                         return ret;
690         }
691
692         /* Create the CRTCs. */
693         for (i = 0; i < rcdu->num_crtcs; ++i) {
694                 struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
695
696                 ret = rcar_du_crtc_create(rgrp, i);
697                 if (ret < 0)
698                         return ret;
699         }
700
701         /* Initialize the encoders. */
702         ret = rcar_du_lvdsenc_init(rcdu);
703         if (ret < 0)
704                 return ret;
705
706         ret = rcar_du_encoders_init(rcdu);
707         if (ret < 0)
708                 return ret;
709
710         if (ret == 0) {
711                 dev_err(rcdu->dev, "error: no encoder could be initialized\n");
712                 return -EINVAL;
713         }
714
715         num_encoders = ret;
716
717         /* Set the possible CRTCs and possible clones. There's always at least
718          * one way for all encoders to clone each other, set all bits in the
719          * possible clones field.
720          */
721         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
722                 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
723                 const struct rcar_du_output_routing *route =
724                         &rcdu->info->routes[renc->output];
725
726                 encoder->possible_crtcs = route->possible_crtcs;
727                 encoder->possible_clones = (1 << num_encoders) - 1;
728         }
729
730         drm_mode_config_reset(dev);
731
732         drm_kms_helper_poll_init(dev);
733
734         if (dev->mode_config.num_connector) {
735                 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
736                                            dev->mode_config.num_connector);
737                 if (IS_ERR(fbdev))
738                         return PTR_ERR(fbdev);
739
740                 rcdu->fbdev = fbdev;
741         } else {
742                 dev_info(rcdu->dev,
743                          "no connector found, disabling fbdev emulation\n");
744         }
745
746         return 0;
747 }