2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
31 #include "radeon_asic.h"
34 #define VCE_V1_0_FW_SIZE (256 * 1024)
35 #define VCE_V1_0_STACK_SIZE (64 * 1024)
36 #define VCE_V1_0_DATA_SIZE (7808 * (RADEON_MAX_VCE_HANDLES + 1))
38 struct vce_v1_0_fw_signature
52 * vce_v1_0_get_rptr - get read pointer
54 * @rdev: radeon_device pointer
55 * @ring: radeon_ring pointer
57 * Returns the current hardware read pointer
59 uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
60 struct radeon_ring *ring)
62 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
63 return RREG32(VCE_RB_RPTR);
65 return RREG32(VCE_RB_RPTR2);
69 * vce_v1_0_get_wptr - get write pointer
71 * @rdev: radeon_device pointer
72 * @ring: radeon_ring pointer
74 * Returns the current hardware write pointer
76 uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
77 struct radeon_ring *ring)
79 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
80 return RREG32(VCE_RB_WPTR);
82 return RREG32(VCE_RB_WPTR2);
86 * vce_v1_0_set_wptr - set write pointer
88 * @rdev: radeon_device pointer
89 * @ring: radeon_ring pointer
91 * Commits the write pointer to the hardware
93 void vce_v1_0_set_wptr(struct radeon_device *rdev,
94 struct radeon_ring *ring)
96 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
97 WREG32(VCE_RB_WPTR, ring->wptr);
99 WREG32(VCE_RB_WPTR2, ring->wptr);
102 void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable)
106 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) {
107 tmp = RREG32(VCE_CLOCK_GATING_A);
108 tmp |= CGC_DYN_CLOCK_MODE;
109 WREG32(VCE_CLOCK_GATING_A, tmp);
111 tmp = RREG32(VCE_UENC_CLOCK_GATING);
114 WREG32(VCE_UENC_CLOCK_GATING, tmp);
116 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
118 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
120 tmp = RREG32(VCE_CLOCK_GATING_A);
121 tmp &= ~CGC_DYN_CLOCK_MODE;
122 WREG32(VCE_CLOCK_GATING_A, tmp);
124 tmp = RREG32(VCE_UENC_CLOCK_GATING);
127 WREG32(VCE_UENC_CLOCK_GATING, tmp);
129 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
131 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
135 static void vce_v1_0_init_cg(struct radeon_device *rdev)
139 tmp = RREG32(VCE_CLOCK_GATING_A);
140 tmp |= CGC_DYN_CLOCK_MODE;
141 WREG32(VCE_CLOCK_GATING_A, tmp);
143 tmp = RREG32(VCE_CLOCK_GATING_B);
146 WREG32(VCE_CLOCK_GATING_B, tmp);
148 tmp = RREG32(VCE_UENC_CLOCK_GATING);
150 WREG32(VCE_UENC_CLOCK_GATING, tmp);
152 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
154 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
157 int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
159 struct vce_v1_0_fw_signature *sign = (void*)rdev->vce_fw->data;
163 switch (rdev->family) {
165 chip_id = 0x01000014;
168 chip_id = 0x01000015;
172 chip_id = 0x01000016;
175 chip_id = 0x01000017;
181 for (i = 0; i < le32_to_cpu(sign->num); ++i) {
182 if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
186 if (i == le32_to_cpu(sign->num))
189 data += (256 - 64) / 4;
190 data[0] = sign->val[i].nonce[0];
191 data[1] = sign->val[i].nonce[1];
192 data[2] = sign->val[i].nonce[2];
193 data[3] = sign->val[i].nonce[3];
194 data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
196 memset(&data[5], 0, 44);
197 memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
199 data += le32_to_cpu(data[4]) / 4;
200 data[0] = sign->val[i].sigval[0];
201 data[1] = sign->val[i].sigval[1];
202 data[2] = sign->val[i].sigval[2];
203 data[3] = sign->val[i].sigval[3];
205 rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
210 unsigned vce_v1_0_bo_size(struct radeon_device *rdev)
212 WARN_ON(VCE_V1_0_FW_SIZE < rdev->vce_fw->size);
213 return VCE_V1_0_FW_SIZE + VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE;
216 int vce_v1_0_resume(struct radeon_device *rdev)
218 uint64_t addr = rdev->vce.gpu_addr;
222 WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16));
223 WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
224 WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
225 WREG32(VCE_CLOCK_GATING_B, 0);
227 WREG32_P(VCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4);
229 WREG32(VCE_LMI_CTRL, 0x00398000);
230 WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1);
231 WREG32(VCE_LMI_SWAP_CNTL, 0);
232 WREG32(VCE_LMI_SWAP_CNTL1, 0);
233 WREG32(VCE_LMI_VM_CTRL, 0);
235 WREG32(VCE_VCPU_SCRATCH7, RADEON_MAX_VCE_HANDLES);
238 size = VCE_V1_0_FW_SIZE;
239 WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
240 WREG32(VCE_VCPU_CACHE_SIZE0, size);
243 size = VCE_V1_0_STACK_SIZE;
244 WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
245 WREG32(VCE_VCPU_CACHE_SIZE1, size);
248 size = VCE_V1_0_DATA_SIZE;
249 WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
250 WREG32(VCE_VCPU_CACHE_SIZE2, size);
252 WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100);
254 WREG32(VCE_LMI_FW_START_KEYSEL, rdev->vce.keyselect);
256 for (i = 0; i < 10; ++i) {
258 if (RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_DONE)
265 if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_PASS))
268 for (i = 0; i < 10; ++i) {
270 if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_BUSY))
277 vce_v1_0_init_cg(rdev);
283 * vce_v1_0_start - start VCE block
285 * @rdev: radeon_device pointer
287 * Setup and start the VCE block
289 int vce_v1_0_start(struct radeon_device *rdev)
291 struct radeon_ring *ring;
295 WREG32_P(VCE_STATUS, 1, ~1);
297 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
298 WREG32(VCE_RB_RPTR, ring->wptr);
299 WREG32(VCE_RB_WPTR, ring->wptr);
300 WREG32(VCE_RB_BASE_LO, ring->gpu_addr);
301 WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
302 WREG32(VCE_RB_SIZE, ring->ring_size / 4);
304 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
305 WREG32(VCE_RB_RPTR2, ring->wptr);
306 WREG32(VCE_RB_WPTR2, ring->wptr);
307 WREG32(VCE_RB_BASE_LO2, ring->gpu_addr);
308 WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
309 WREG32(VCE_RB_SIZE2, ring->ring_size / 4);
311 WREG32_P(VCE_VCPU_CNTL, VCE_CLK_EN, ~VCE_CLK_EN);
313 WREG32_P(VCE_SOFT_RESET,
314 VCE_ECPU_SOFT_RESET |
315 VCE_FME_SOFT_RESET, ~(
316 VCE_ECPU_SOFT_RESET |
317 VCE_FME_SOFT_RESET));
321 WREG32_P(VCE_SOFT_RESET, 0, ~(
322 VCE_ECPU_SOFT_RESET |
323 VCE_FME_SOFT_RESET));
325 for (i = 0; i < 10; ++i) {
327 for (j = 0; j < 100; ++j) {
328 status = RREG32(VCE_STATUS);
337 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
338 WREG32_P(VCE_SOFT_RESET, VCE_ECPU_SOFT_RESET, ~VCE_ECPU_SOFT_RESET);
340 WREG32_P(VCE_SOFT_RESET, 0, ~VCE_ECPU_SOFT_RESET);
345 /* clear BUSY flag */
346 WREG32_P(VCE_STATUS, 0, ~1);
349 DRM_ERROR("VCE not responding, giving up!!!\n");
356 int vce_v1_0_init(struct radeon_device *rdev)
358 struct radeon_ring *ring;
361 r = vce_v1_0_start(rdev);
365 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
367 r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring);
373 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
375 r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring);
381 DRM_INFO("VCE initialized successfully.\n");