2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/notify.h>
29 #include <subdev/bios.h>
30 #include <subdev/bios/dcb.h>
32 #include <nvif/class.h>
33 #include <nvif/event.h>
34 #include <nvif/unpack.h>
37 nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
38 struct nvkm_notify *notify)
40 struct nvkm_disp *disp =
41 container_of(notify->event, typeof(*disp), vblank);
43 struct nvif_notify_head_req_v0 v0;
47 if (nvif_unpack(req->v0, 0, 0, false)) {
48 notify->size = sizeof(struct nvif_notify_head_rep_v0);
49 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
51 notify->index = req->v0.head;
60 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
62 struct nvif_notify_head_rep_v0 rep = {};
63 nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
67 nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
68 struct nvkm_notify *notify)
70 struct nvkm_disp *disp =
71 container_of(notify->event, typeof(*disp), hpd);
73 struct nvif_notify_conn_req_v0 v0;
75 struct nvkm_output *outp;
78 if (nvif_unpack(req->v0, 0, 0, false)) {
79 notify->size = sizeof(struct nvif_notify_conn_rep_v0);
80 list_for_each_entry(outp, &disp->outp, head) {
81 if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
82 if (ret = -ENODEV, outp->conn->hpd.event) {
83 notify->types = req->v0.mask;
84 notify->index = req->v0.conn;
95 static const struct nvkm_event_func
96 nvkm_disp_hpd_func = {
97 .ctor = nvkm_disp_hpd_ctor
101 nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
103 struct nvkm_disp *disp = (void *)object->engine;
105 case NV04_DISP_NTFY_VBLANK:
106 *event = &disp->vblank;
108 case NV04_DISP_NTFY_CONN:
118 _nvkm_disp_fini(struct nvkm_object *object, bool suspend)
120 struct nvkm_disp *disp = (void *)object;
121 struct nvkm_output *outp;
124 list_for_each_entry(outp, &disp->outp, head) {
125 ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend);
130 return nvkm_engine_fini(&disp->base, suspend);
133 list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
134 nv_ofuncs(outp)->init(nv_object(outp));
141 _nvkm_disp_init(struct nvkm_object *object)
143 struct nvkm_disp *disp = (void *)object;
144 struct nvkm_output *outp;
147 ret = nvkm_engine_init(&disp->base);
151 list_for_each_entry(outp, &disp->outp, head) {
152 ret = nv_ofuncs(outp)->init(nv_object(outp));
160 list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
161 nv_ofuncs(outp)->fini(nv_object(outp), false);
168 _nvkm_disp_dtor(struct nvkm_object *object)
170 struct nvkm_disp *disp = (void *)object;
171 struct nvkm_output *outp, *outt;
173 nvkm_event_fini(&disp->vblank);
174 nvkm_event_fini(&disp->hpd);
176 if (disp->outp.next) {
177 list_for_each_entry_safe(outp, outt, &disp->outp, head) {
178 nvkm_object_ref(NULL, (struct nvkm_object **)&outp);
182 nvkm_engine_destroy(&disp->base);
186 nvkm_disp_create_(struct nvkm_object *parent, struct nvkm_object *engine,
187 struct nvkm_oclass *oclass, int heads, const char *intname,
188 const char *extname, int length, void **pobject)
190 struct nvkm_disp_impl *impl = (void *)oclass;
191 struct nvkm_bios *bios = nvkm_bios(parent);
192 struct nvkm_disp *disp;
193 struct nvkm_oclass **sclass;
194 struct nvkm_object *object;
195 struct dcb_output dcbE;
196 u8 hpd = 0, ver, hdr;
200 ret = nvkm_engine_create_(parent, engine, oclass, true, intname,
201 extname, length, pobject);
206 INIT_LIST_HEAD(&disp->outp);
208 /* create output objects for each display path in the vbios */
210 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
211 if (dcbE.type == DCB_OUTPUT_UNUSED)
213 if (dcbE.type == DCB_OUTPUT_EOL)
215 data = dcbE.location << 4 | dcbE.type;
217 oclass = nvkm_output_oclass;
219 while (sclass && sclass[0]) {
220 if (sclass[0]->handle == data) {
227 nvkm_object_ctor(*pobject, NULL, oclass, &dcbE, i, &object);
228 hpd = max(hpd, (u8)(dcbE.connector + 1));
231 ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
235 ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank);