These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / gpu / drm / nouveau / nvkm / subdev / clk / mcp77.c
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
25 #include "gt215.h"
26 #include "pll.h"
27
28 #include <subdev/bios.h>
29 #include <subdev/bios/pll.h>
30 #include <subdev/timer.h>
31
32 struct mcp77_clk {
33         struct nvkm_clk base;
34         enum nv_clk_src csrc, ssrc, vsrc;
35         u32 cctrl, sctrl;
36         u32 ccoef, scoef;
37         u32 cpost, spost;
38         u32 vdiv;
39 };
40
41 static u32
42 read_div(struct mcp77_clk *clk)
43 {
44         struct nvkm_device *device = clk->base.subdev.device;
45         return nvkm_rd32(device, 0x004600);
46 }
47
48 static u32
49 read_pll(struct mcp77_clk *clk, u32 base)
50 {
51         struct nvkm_device *device = clk->base.subdev.device;
52         u32 ctrl = nvkm_rd32(device, base + 0);
53         u32 coef = nvkm_rd32(device, base + 4);
54         u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
55         u32 post_div = 0;
56         u32 clock = 0;
57         int N1, M1;
58
59         switch (base){
60         case 0x4020:
61                 post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
62                 break;
63         case 0x4028:
64                 post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
65                 break;
66         default:
67                 break;
68         }
69
70         N1 = (coef & 0x0000ff00) >> 8;
71         M1 = (coef & 0x000000ff);
72         if ((ctrl & 0x80000000) && M1) {
73                 clock = ref * N1 / M1;
74                 clock = clock / post_div;
75         }
76
77         return clock;
78 }
79
80 static int
81 mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
82 {
83         struct mcp77_clk *clk = mcp77_clk(base);
84         struct nvkm_subdev *subdev = &clk->base.subdev;
85         struct nvkm_device *device = subdev->device;
86         u32 mast = nvkm_rd32(device, 0x00c054);
87         u32 P = 0;
88
89         switch (src) {
90         case nv_clk_src_crystal:
91                 return device->crystal;
92         case nv_clk_src_href:
93                 return 100000; /* PCIE reference clock */
94         case nv_clk_src_hclkm4:
95                 return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
96         case nv_clk_src_hclkm2d3:
97                 return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
98         case nv_clk_src_host:
99                 switch (mast & 0x000c0000) {
100                 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
101                 case 0x00040000: break;
102                 case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
103                 case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
104                 }
105                 break;
106         case nv_clk_src_core:
107                 P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
108
109                 switch (mast & 0x00000003) {
110                 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
111                 case 0x00000001: return 0;
112                 case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
113                 case 0x00000003: return read_pll(clk, 0x004028) >> P;
114                 }
115                 break;
116         case nv_clk_src_cclk:
117                 if ((mast & 0x03000000) != 0x03000000)
118                         return nvkm_clk_read(&clk->base, nv_clk_src_core);
119
120                 if ((mast & 0x00000200) == 0x00000000)
121                         return nvkm_clk_read(&clk->base, nv_clk_src_core);
122
123                 switch (mast & 0x00000c00) {
124                 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
125                 case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
126                 case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
127                 default: return 0;
128                 }
129         case nv_clk_src_shader:
130                 P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
131                 switch (mast & 0x00000030) {
132                 case 0x00000000:
133                         if (mast & 0x00000040)
134                                 return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
135                         return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
136                 case 0x00000010: break;
137                 case 0x00000020: return read_pll(clk, 0x004028) >> P;
138                 case 0x00000030: return read_pll(clk, 0x004020) >> P;
139                 }
140                 break;
141         case nv_clk_src_mem:
142                 return 0;
143                 break;
144         case nv_clk_src_vdec:
145                 P = (read_div(clk) & 0x00000700) >> 8;
146
147                 switch (mast & 0x00400000) {
148                 case 0x00400000:
149                         return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
150                         break;
151                 default:
152                         return 500000 >> P;
153                         break;
154                 }
155                 break;
156         default:
157                 break;
158         }
159
160         nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
161         return 0;
162 }
163
164 static u32
165 calc_pll(struct mcp77_clk *clk, u32 reg,
166          u32 clock, int *N, int *M, int *P)
167 {
168         struct nvkm_subdev *subdev = &clk->base.subdev;
169         struct nvbios_pll pll;
170         int ret;
171
172         ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
173         if (ret)
174                 return 0;
175
176         pll.vco2.max_freq = 0;
177         pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
178         if (!pll.refclk)
179                 return 0;
180
181         return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
182 }
183
184 static inline u32
185 calc_P(u32 src, u32 target, int *div)
186 {
187         u32 clk0 = src, clk1 = src;
188         for (*div = 0; *div <= 7; (*div)++) {
189                 if (clk0 <= target) {
190                         clk1 = clk0 << (*div ? 1 : 0);
191                         break;
192                 }
193                 clk0 >>= 1;
194         }
195
196         if (target - clk0 <= clk1 - target)
197                 return clk0;
198         (*div)--;
199         return clk1;
200 }
201
202 static int
203 mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
204 {
205         struct mcp77_clk *clk = mcp77_clk(base);
206         const int shader = cstate->domain[nv_clk_src_shader];
207         const int core = cstate->domain[nv_clk_src_core];
208         const int vdec = cstate->domain[nv_clk_src_vdec];
209         struct nvkm_subdev *subdev = &clk->base.subdev;
210         u32 out = 0, clock = 0;
211         int N, M, P1, P2 = 0;
212         int divs = 0;
213
214         /* cclk: find suitable source, disable PLL if we can */
215         if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
216                 out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
217
218         /* Calculate clock * 2, so shader clock can use it too */
219         clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
220
221         if (abs(core - out) <= abs(core - (clock >> 1))) {
222                 clk->csrc = nv_clk_src_hclkm4;
223                 clk->cctrl = divs << 16;
224         } else {
225                 /* NVCTRL is actually used _after_ NVPOST, and after what we
226                  * call NVPLL. To make matters worse, NVPOST is an integer
227                  * divider instead of a right-shift number. */
228                 if(P1 > 2) {
229                         P2 = P1 - 2;
230                         P1 = 2;
231                 }
232
233                 clk->csrc = nv_clk_src_core;
234                 clk->ccoef = (N << 8) | M;
235
236                 clk->cctrl = (P2 + 1) << 16;
237                 clk->cpost = (1 << P1) << 16;
238         }
239
240         /* sclk: nvpll + divisor, href or spll */
241         out = 0;
242         if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
243                 clk->ssrc = nv_clk_src_href;
244         } else {
245                 clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
246                 if (clk->csrc == nv_clk_src_core)
247                         out = calc_P((core << 1), shader, &divs);
248
249                 if (abs(shader - out) <=
250                     abs(shader - clock) &&
251                    (divs + P2) <= 7) {
252                         clk->ssrc = nv_clk_src_core;
253                         clk->sctrl = (divs + P2) << 16;
254                 } else {
255                         clk->ssrc = nv_clk_src_shader;
256                         clk->scoef = (N << 8) | M;
257                         clk->sctrl = P1 << 16;
258                 }
259         }
260
261         /* vclk */
262         out = calc_P(core, vdec, &divs);
263         clock = calc_P(500000, vdec, &P1);
264         if(abs(vdec - out) <= abs(vdec - clock)) {
265                 clk->vsrc = nv_clk_src_cclk;
266                 clk->vdiv = divs << 16;
267         } else {
268                 clk->vsrc = nv_clk_src_vdec;
269                 clk->vdiv = P1 << 16;
270         }
271
272         /* Print strategy! */
273         nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
274                    clk->ccoef, clk->cpost, clk->cctrl);
275         nvkm_debug(subdev, " spll: %08x %08x %08x\n",
276                    clk->scoef, clk->spost, clk->sctrl);
277         nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
278         if (clk->csrc == nv_clk_src_hclkm4)
279                 nvkm_debug(subdev, "core: hrefm4\n");
280         else
281                 nvkm_debug(subdev, "core: nvpll\n");
282
283         if (clk->ssrc == nv_clk_src_hclkm4)
284                 nvkm_debug(subdev, "shader: hrefm4\n");
285         else if (clk->ssrc == nv_clk_src_core)
286                 nvkm_debug(subdev, "shader: nvpll\n");
287         else
288                 nvkm_debug(subdev, "shader: spll\n");
289
290         if (clk->vsrc == nv_clk_src_hclkm4)
291                 nvkm_debug(subdev, "vdec: 500MHz\n");
292         else
293                 nvkm_debug(subdev, "vdec: core\n");
294
295         return 0;
296 }
297
298 static int
299 mcp77_clk_prog(struct nvkm_clk *base)
300 {
301         struct mcp77_clk *clk = mcp77_clk(base);
302         struct nvkm_subdev *subdev = &clk->base.subdev;
303         struct nvkm_device *device = subdev->device;
304         u32 pllmask = 0, mast;
305         unsigned long flags;
306         unsigned long *f = &flags;
307         int ret = 0;
308
309         ret = gt215_clk_pre(&clk->base, f);
310         if (ret)
311                 goto out;
312
313         /* First switch to safe clocks: href */
314         mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
315         mast &= ~0x00400e73;
316         mast |= 0x03000000;
317
318         switch (clk->csrc) {
319         case nv_clk_src_hclkm4:
320                 nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
321                 mast |= 0x00000002;
322                 break;
323         case nv_clk_src_core:
324                 nvkm_wr32(device, 0x402c, clk->ccoef);
325                 nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
326                 nvkm_wr32(device, 0x4040, clk->cpost);
327                 pllmask |= (0x3 << 8);
328                 mast |= 0x00000003;
329                 break;
330         default:
331                 nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
332                 goto resume;
333         }
334
335         switch (clk->ssrc) {
336         case nv_clk_src_href:
337                 nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
338                 /* mast |= 0x00000000; */
339                 break;
340         case nv_clk_src_core:
341                 nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
342                 mast |= 0x00000020;
343                 break;
344         case nv_clk_src_shader:
345                 nvkm_wr32(device, 0x4024, clk->scoef);
346                 nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
347                 nvkm_wr32(device, 0x4070, clk->spost);
348                 pllmask |= (0x3 << 12);
349                 mast |= 0x00000030;
350                 break;
351         default:
352                 nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
353                 goto resume;
354         }
355
356         if (nvkm_msec(device, 2000,
357                 u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
358                 if (tmp == pllmask)
359                         break;
360         ) < 0)
361                 goto resume;
362
363         switch (clk->vsrc) {
364         case nv_clk_src_cclk:
365                 mast |= 0x00400000;
366         default:
367                 nvkm_wr32(device, 0x4600, clk->vdiv);
368         }
369
370         nvkm_wr32(device, 0xc054, mast);
371
372 resume:
373         /* Disable some PLLs and dividers when unused */
374         if (clk->csrc != nv_clk_src_core) {
375                 nvkm_wr32(device, 0x4040, 0x00000000);
376                 nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
377         }
378
379         if (clk->ssrc != nv_clk_src_shader) {
380                 nvkm_wr32(device, 0x4070, 0x00000000);
381                 nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
382         }
383
384 out:
385         if (ret == -EBUSY)
386                 f = NULL;
387
388         gt215_clk_post(&clk->base, f);
389         return ret;
390 }
391
392 static void
393 mcp77_clk_tidy(struct nvkm_clk *base)
394 {
395 }
396
397 static const struct nvkm_clk_func
398 mcp77_clk = {
399         .read = mcp77_clk_read,
400         .calc = mcp77_clk_calc,
401         .prog = mcp77_clk_prog,
402         .tidy = mcp77_clk_tidy,
403         .domains = {
404                 { nv_clk_src_crystal, 0xff },
405                 { nv_clk_src_href   , 0xff },
406                 { nv_clk_src_core   , 0xff, 0, "core", 1000 },
407                 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
408                 { nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
409                 { nv_clk_src_max }
410         }
411 };
412
413 int
414 mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
415 {
416         struct mcp77_clk *clk;
417
418         if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
419                 return -ENOMEM;
420         *pclk = &clk->base;
421
422         return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
423 }