X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?p=kvmfornfv.git;a=blobdiff_plain;f=kernel%2Fdrivers%2Fgpu%2Fdrm%2Fradeon%2Fradeon.h;h=5580568088bb2ff2d78f85c78a4b4f45fbd8243a;hp=46eb0fa75a614307286446a99d7c1c2037973ab4;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hpb=f93b97fd65072de626c074dbe099a1fff05ce060 diff --git a/kernel/drivers/gpu/drm/radeon/radeon.h b/kernel/drivers/gpu/drm/radeon/radeon.h index 46eb0fa75..558056808 100644 --- a/kernel/drivers/gpu/drm/radeon/radeon.h +++ b/kernel/drivers/gpu/drm/radeon/radeon.h @@ -268,6 +268,7 @@ struct radeon_clock { uint32_t current_dispclk; uint32_t dp_extclk; uint32_t max_pixel_clock; + uint32_t vco_freq; }; /* @@ -467,7 +468,6 @@ struct radeon_bo_va { /* protected by bo being reserved */ struct list_head bo_list; uint32_t flags; - uint64_t addr; struct radeon_fence *last_pt_update; unsigned ref_count; @@ -719,7 +719,7 @@ struct radeon_doorbell { resource_size_t size; u32 __iomem *ptr; u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */ - unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)]; + DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS); }; int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); @@ -941,6 +941,9 @@ struct radeon_vm { /* BOs freed, but not yet updated in the PT */ struct list_head freed; + /* BOs cleared in the PT */ + struct list_head cleared; + /* contains the page directory */ struct radeon_bo *page_directory; unsigned max_pde_used; @@ -1656,6 +1659,7 @@ struct radeon_pm { u8 fan_max_rpm; /* dpm */ bool dpm_enabled; + bool sysfs_initialized; struct radeon_dpm dpm; }; @@ -1709,8 +1713,6 @@ int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, * VCE */ #define RADEON_MAX_VCE_HANDLES 16 -#define RADEON_VCE_STACK_SIZE (1024*1024) -#define RADEON_VCE_HEAP_SIZE (4*1024*1024) struct radeon_vce { struct radeon_bo *vcpu_bo; @@ -1721,6 +1723,7 @@ struct radeon_vce { struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; unsigned img_size[RADEON_MAX_VCE_HANDLES]; struct delayed_work idle_work; + uint32_t keyselect; }; int radeon_vce_init(struct radeon_device *rdev); @@ -2412,7 +2415,7 @@ struct radeon_device { struct r600_ih ih; /* r6/700 interrupt ring */ struct radeon_rlc rlc; struct radeon_mec mec; - struct work_struct hotplug_work; + struct delayed_work hotplug_work; struct work_struct dp_work; struct work_struct audio_work; int num_crtc; /* number of crtcs */ @@ -2435,6 +2438,7 @@ struct radeon_device { atomic64_t vram_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic_t gpu_reset_counter; /* ACPI interface */ struct radeon_atif atif; struct radeon_atcs atcs; @@ -2456,7 +2460,6 @@ struct radeon_device { /* amdkfd interface */ struct kfd_dev *kfd; - struct radeon_sa_manager kfd_bo; struct mutex mn_lock; DECLARE_HASHTABLE(mn_hash, 7); @@ -2472,38 +2475,24 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev); #define RADEON_MIN_MMIO_SIZE 0x10000 +uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg); +void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v); static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, bool always_indirect) { /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) return readl(((void __iomem *)rdev->rmmio) + reg); - else { - unsigned long flags; - uint32_t ret; - - spin_lock_irqsave(&rdev->mmio_idx_lock, flags); - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); - ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); - spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); - - return ret; - } + else + return r100_mm_rreg_slow(rdev, reg); } - static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, bool always_indirect) { if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) writel(v, ((void __iomem *)rdev->rmmio) + reg); - else { - unsigned long flags; - - spin_lock_irqsave(&rdev->mmio_idx_lock, flags); - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); - writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); - spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); - } + else + r100_mm_wreg_slow(rdev, reg, v); } u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); @@ -2579,6 +2568,13 @@ static inline struct radeon_fence *to_radeon_fence(struct fence *f) tmp_ |= ((val) & ~(mask)); \ WREG32_PLL(reg, tmp_); \ } while (0) +#define WREG32_SMC_P(reg, val, mask) \ + do { \ + uint32_t tmp_ = RREG32_SMC(reg); \ + tmp_ &= (mask); \ + tmp_ |= ((val) & ~(mask)); \ + WREG32_SMC(reg, tmp_); \ + } while (0) #define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false)) #define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) @@ -2587,184 +2583,29 @@ static inline struct radeon_fence *to_radeon_fence(struct fence *f) #define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v)) /* - * Indirect registers accessor + * Indirect registers accessors. + * They used to be inlined, but this increases code size by ~65 kbytes. + * Since each performs a pair of MMIO ops + * within a spin_lock_irqsave/spin_unlock_irqrestore region, + * the cost of call+ret is almost negligible. MMIO and locking + * costs several dozens of cycles each at best, call+ret is ~5 cycles. */ -static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) -{ - unsigned long flags; - uint32_t r; - - spin_lock_irqsave(&rdev->pcie_idx_lock, flags); - WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); - r = RREG32(RADEON_PCIE_DATA); - spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); - return r; -} - -static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - unsigned long flags; - - spin_lock_irqsave(&rdev->pcie_idx_lock, flags); - WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); - WREG32(RADEON_PCIE_DATA, (v)); - spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); -} - -static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&rdev->smc_idx_lock, flags); - WREG32(TN_SMC_IND_INDEX_0, (reg)); - r = RREG32(TN_SMC_IND_DATA_0); - spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); - return r; -} - -static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&rdev->smc_idx_lock, flags); - WREG32(TN_SMC_IND_INDEX_0, (reg)); - WREG32(TN_SMC_IND_DATA_0, (v)); - spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); -} - -static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&rdev->rcu_idx_lock, flags); - WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); - r = RREG32(R600_RCU_DATA); - spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); - return r; -} - -static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&rdev->rcu_idx_lock, flags); - WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); - WREG32(R600_RCU_DATA, (v)); - spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); -} - -static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&rdev->cg_idx_lock, flags); - WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); - r = RREG32(EVERGREEN_CG_IND_DATA); - spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); - return r; -} - -static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&rdev->cg_idx_lock, flags); - WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); - WREG32(EVERGREEN_CG_IND_DATA, (v)); - spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); -} - -static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&rdev->pif_idx_lock, flags); - WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); - r = RREG32(EVERGREEN_PIF_PHY0_DATA); - spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); - return r; -} - -static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&rdev->pif_idx_lock, flags); - WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); - WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); - spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); -} - -static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&rdev->pif_idx_lock, flags); - WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); - r = RREG32(EVERGREEN_PIF_PHY1_DATA); - spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); - return r; -} - -static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&rdev->pif_idx_lock, flags); - WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); - WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); - spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); -} - -static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&rdev->uvd_idx_lock, flags); - WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); - r = RREG32(R600_UVD_CTX_DATA); - spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); - return r; -} - -static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&rdev->uvd_idx_lock, flags); - WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); - WREG32(R600_UVD_CTX_DATA, (v)); - spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); -} - - -static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&rdev->didt_idx_lock, flags); - WREG32(CIK_DIDT_IND_INDEX, (reg)); - r = RREG32(CIK_DIDT_IND_DATA); - spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); - return r; -} - -static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&rdev->didt_idx_lock, flags); - WREG32(CIK_DIDT_IND_INDEX, (reg)); - WREG32(CIK_DIDT_IND_DATA, (v)); - spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); -} +uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); +void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg); +void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v); +u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg); +void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v); +u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg); +void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v); +u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg); +void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v); +u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg); +void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v); +u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg); +void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v); +u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg); +void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v); void r100_pll_errata_after_index(struct radeon_device *rdev);