2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/device.h>
28 #include <subdev/bios.h>
29 #include <subdev/bios/pll.h>
30 #include <subdev/timer.h>
32 struct mcp77_clk_priv {
34 enum nv_clk_src csrc, ssrc, vsrc;
42 read_div(struct nvkm_clk *clk)
44 return nv_rd32(clk, 0x004600);
48 read_pll(struct nvkm_clk *clk, u32 base)
50 u32 ctrl = nv_rd32(clk, base + 0);
51 u32 coef = nv_rd32(clk, base + 4);
52 u32 ref = clk->read(clk, nv_clk_src_href);
59 post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
62 post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
68 N1 = (coef & 0x0000ff00) >> 8;
69 M1 = (coef & 0x000000ff);
70 if ((ctrl & 0x80000000) && M1) {
71 clock = ref * N1 / M1;
72 clock = clock / post_div;
79 mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
81 struct mcp77_clk_priv *priv = (void *)clk;
82 u32 mast = nv_rd32(clk, 0x00c054);
86 case nv_clk_src_crystal:
87 return nv_device(priv)->crystal;
89 return 100000; /* PCIE reference clock */
90 case nv_clk_src_hclkm4:
91 return clk->read(clk, nv_clk_src_href) * 4;
92 case nv_clk_src_hclkm2d3:
93 return clk->read(clk, nv_clk_src_href) * 2 / 3;
95 switch (mast & 0x000c0000) {
96 case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
97 case 0x00040000: break;
98 case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
99 case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
102 case nv_clk_src_core:
103 P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
105 switch (mast & 0x00000003) {
106 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
107 case 0x00000001: return 0;
108 case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
109 case 0x00000003: return read_pll(clk, 0x004028) >> P;
112 case nv_clk_src_cclk:
113 if ((mast & 0x03000000) != 0x03000000)
114 return clk->read(clk, nv_clk_src_core);
116 if ((mast & 0x00000200) == 0x00000000)
117 return clk->read(clk, nv_clk_src_core);
119 switch (mast & 0x00000c00) {
120 case 0x00000000: return clk->read(clk, nv_clk_src_href);
121 case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
122 case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
125 case nv_clk_src_shader:
126 P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
127 switch (mast & 0x00000030) {
129 if (mast & 0x00000040)
130 return clk->read(clk, nv_clk_src_href) >> P;
131 return clk->read(clk, nv_clk_src_crystal) >> P;
132 case 0x00000010: break;
133 case 0x00000020: return read_pll(clk, 0x004028) >> P;
134 case 0x00000030: return read_pll(clk, 0x004020) >> P;
140 case nv_clk_src_vdec:
141 P = (read_div(clk) & 0x00000700) >> 8;
143 switch (mast & 0x00400000) {
145 return clk->read(clk, nv_clk_src_core) >> P;
156 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
161 calc_pll(struct mcp77_clk_priv *priv, u32 reg,
162 u32 clock, int *N, int *M, int *P)
164 struct nvkm_bios *bios = nvkm_bios(priv);
165 struct nvbios_pll pll;
166 struct nvkm_clk *clk = &priv->base;
169 ret = nvbios_pll_parse(bios, reg, &pll);
173 pll.vco2.max_freq = 0;
174 pll.refclk = clk->read(clk, nv_clk_src_href);
178 return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
182 calc_P(u32 src, u32 target, int *div)
184 u32 clk0 = src, clk1 = src;
185 for (*div = 0; *div <= 7; (*div)++) {
186 if (clk0 <= target) {
187 clk1 = clk0 << (*div ? 1 : 0);
193 if (target - clk0 <= clk1 - target)
200 mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
202 struct mcp77_clk_priv *priv = (void *)clk;
203 const int shader = cstate->domain[nv_clk_src_shader];
204 const int core = cstate->domain[nv_clk_src_core];
205 const int vdec = cstate->domain[nv_clk_src_vdec];
206 u32 out = 0, clock = 0;
207 int N, M, P1, P2 = 0;
210 /* cclk: find suitable source, disable PLL if we can */
211 if (core < clk->read(clk, nv_clk_src_hclkm4))
212 out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
214 /* Calculate clock * 2, so shader clock can use it too */
215 clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
217 if (abs(core - out) <= abs(core - (clock >> 1))) {
218 priv->csrc = nv_clk_src_hclkm4;
219 priv->cctrl = divs << 16;
221 /* NVCTRL is actually used _after_ NVPOST, and after what we
222 * call NVPLL. To make matters worse, NVPOST is an integer
223 * divider instead of a right-shift number. */
229 priv->csrc = nv_clk_src_core;
230 priv->ccoef = (N << 8) | M;
232 priv->cctrl = (P2 + 1) << 16;
233 priv->cpost = (1 << P1) << 16;
236 /* sclk: nvpll + divisor, href or spll */
238 if (shader == clk->read(clk, nv_clk_src_href)) {
239 priv->ssrc = nv_clk_src_href;
241 clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
242 if (priv->csrc == nv_clk_src_core)
243 out = calc_P((core << 1), shader, &divs);
245 if (abs(shader - out) <=
246 abs(shader - clock) &&
248 priv->ssrc = nv_clk_src_core;
249 priv->sctrl = (divs + P2) << 16;
251 priv->ssrc = nv_clk_src_shader;
252 priv->scoef = (N << 8) | M;
253 priv->sctrl = P1 << 16;
258 out = calc_P(core, vdec, &divs);
259 clock = calc_P(500000, vdec, &P1);
260 if(abs(vdec - out) <= abs(vdec - clock)) {
261 priv->vsrc = nv_clk_src_cclk;
262 priv->vdiv = divs << 16;
264 priv->vsrc = nv_clk_src_vdec;
265 priv->vdiv = P1 << 16;
268 /* Print strategy! */
269 nv_debug(priv, "nvpll: %08x %08x %08x\n",
270 priv->ccoef, priv->cpost, priv->cctrl);
271 nv_debug(priv, " spll: %08x %08x %08x\n",
272 priv->scoef, priv->spost, priv->sctrl);
273 nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
274 if (priv->csrc == nv_clk_src_hclkm4)
275 nv_debug(priv, "core: hrefm4\n");
277 nv_debug(priv, "core: nvpll\n");
279 if (priv->ssrc == nv_clk_src_hclkm4)
280 nv_debug(priv, "shader: hrefm4\n");
281 else if (priv->ssrc == nv_clk_src_core)
282 nv_debug(priv, "shader: nvpll\n");
284 nv_debug(priv, "shader: spll\n");
286 if (priv->vsrc == nv_clk_src_hclkm4)
287 nv_debug(priv, "vdec: 500MHz\n");
289 nv_debug(priv, "vdec: core\n");
295 mcp77_clk_prog(struct nvkm_clk *clk)
297 struct mcp77_clk_priv *priv = (void *)clk;
298 u32 pllmask = 0, mast;
300 unsigned long *f = &flags;
303 ret = gt215_clk_pre(clk, f);
307 /* First switch to safe clocks: href */
308 mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
312 switch (priv->csrc) {
313 case nv_clk_src_hclkm4:
314 nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
317 case nv_clk_src_core:
318 nv_wr32(clk, 0x402c, priv->ccoef);
319 nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
320 nv_wr32(clk, 0x4040, priv->cpost);
321 pllmask |= (0x3 << 8);
325 nv_warn(priv,"Reclocking failed: unknown core clock\n");
329 switch (priv->ssrc) {
330 case nv_clk_src_href:
331 nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
332 /* mast |= 0x00000000; */
334 case nv_clk_src_core:
335 nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
338 case nv_clk_src_shader:
339 nv_wr32(clk, 0x4024, priv->scoef);
340 nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
341 nv_wr32(clk, 0x4070, priv->spost);
342 pllmask |= (0x3 << 12);
346 nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
350 if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
351 nv_warn(priv,"Reclocking failed: unstable PLLs\n");
355 switch (priv->vsrc) {
356 case nv_clk_src_cclk:
359 nv_wr32(clk, 0x4600, priv->vdiv);
362 nv_wr32(clk, 0xc054, mast);
365 /* Disable some PLLs and dividers when unused */
366 if (priv->csrc != nv_clk_src_core) {
367 nv_wr32(clk, 0x4040, 0x00000000);
368 nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
371 if (priv->ssrc != nv_clk_src_shader) {
372 nv_wr32(clk, 0x4070, 0x00000000);
373 nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
380 gt215_clk_post(clk, f);
385 mcp77_clk_tidy(struct nvkm_clk *clk)
389 static struct nvkm_domain
391 { nv_clk_src_crystal, 0xff },
392 { nv_clk_src_href , 0xff },
393 { nv_clk_src_core , 0xff, 0, "core", 1000 },
394 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
395 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
400 mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
401 struct nvkm_oclass *oclass, void *data, u32 size,
402 struct nvkm_object **pobject)
404 struct mcp77_clk_priv *priv;
407 ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
408 NULL, 0, true, &priv);
409 *pobject = nv_object(priv);
413 priv->base.read = mcp77_clk_read;
414 priv->base.calc = mcp77_clk_calc;
415 priv->base.prog = mcp77_clk_prog;
416 priv->base.tidy = mcp77_clk_tidy;
421 mcp77_clk_oclass = &(struct nvkm_oclass) {
422 .handle = NV_SUBDEV(CLK, 0xaa),
423 .ofuncs = &(struct nvkm_ofuncs) {
424 .ctor = mcp77_clk_ctor,
425 .dtor = _nvkm_clk_dtor,
426 .init = _nvkm_clk_init,
427 .fini = _nvkm_clk_fini,