2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/slab.h>
30 #include <linux/seq_file.h>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
34 #include <drm/drm_debugfs.h>
35 #include <drm/drm_device.h>
36 #include <drm/drm_pci.h>
37 #include <drm/drm_vblank.h>
38 #include <drm/radeon_drm.h>
44 #include "radeon_asic.h"
45 #include "radeon_audio.h"
46 #include "radeon_mode.h"
47 #include "radeon_ucode.h"
52 static const u32 crtc_offsets[2] =
55 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
58 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
60 /* r600,rv610,rv630,rv620,rv635,rv670 */
61 int r600_mc_wait_for_idle(struct radeon_device *rdev);
62 static void r600_gpu_init(struct radeon_device *rdev);
63 void r600_fini(struct radeon_device *rdev);
64 void r600_irq_disable(struct radeon_device *rdev);
65 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
66 extern int evergreen_rlc_resume(struct radeon_device *rdev);
67 extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
70 * Indirect registers accessor
72 u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
77 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
78 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
79 r = RREG32(R600_RCU_DATA);
80 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
84 void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
88 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
89 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
90 WREG32(R600_RCU_DATA, (v));
91 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
94 u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
99 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
100 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
101 r = RREG32(R600_UVD_CTX_DATA);
102 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
106 void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
110 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
111 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
112 WREG32(R600_UVD_CTX_DATA, (v));
113 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
117 * r600_get_allowed_info_register - fetch the register for the info ioctl
119 * @rdev: radeon_device pointer
120 * @reg: register offset in bytes
121 * @val: register value
123 * Returns 0 for success or -EINVAL for an invalid register
126 int r600_get_allowed_info_register(struct radeon_device *rdev,
132 case R_000E50_SRBM_STATUS:
143 * r600_get_xclk - get the xclk
145 * @rdev: radeon_device pointer
147 * Returns the reference clock used by the gfx engine
148 * (r6xx, IGPs, APUs).
150 u32 r600_get_xclk(struct radeon_device *rdev)
152 return rdev->clock.spll.reference_freq;
155 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
157 unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
160 /* bypass vclk and dclk with bclk */
161 WREG32_P(CG_UPLL_FUNC_CNTL_2,
162 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
163 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
165 /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
166 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
167 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
169 if (rdev->family >= CHIP_RS780)
170 WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
173 if (!vclk || !dclk) {
174 /* keep the Bypass mode, put PLL to sleep */
175 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
179 if (rdev->clock.spll.reference_freq == 10000)
184 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
185 ref_div + 1, 0xFFF, 2, 30, ~0,
186 &fb_div, &vclk_div, &dclk_div);
190 if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
195 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
199 /* assert PLL_RESET */
200 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
202 /* For RS780 we have to choose ref clk */
203 if (rdev->family >= CHIP_RS780)
204 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
205 ~UPLL_REFCLK_SRC_SEL_MASK);
207 /* set the required fb, ref and post divder values */
208 WREG32_P(CG_UPLL_FUNC_CNTL,
209 UPLL_FB_DIV(fb_div) |
210 UPLL_REF_DIV(ref_div),
211 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
212 WREG32_P(CG_UPLL_FUNC_CNTL_2,
213 UPLL_SW_HILEN(vclk_div >> 1) |
214 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
215 UPLL_SW_HILEN2(dclk_div >> 1) |
216 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
217 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
220 /* give the PLL some time to settle */
223 /* deassert PLL_RESET */
224 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
228 /* deassert BYPASS EN */
229 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
231 if (rdev->family >= CHIP_RS780)
232 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
234 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
238 /* switch VCLK and DCLK selection */
239 WREG32_P(CG_UPLL_FUNC_CNTL_2,
240 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
241 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
248 void dce3_program_fmt(struct drm_encoder *encoder)
250 struct drm_device *dev = encoder->dev;
251 struct radeon_device *rdev = dev->dev_private;
252 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
253 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
254 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
257 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
260 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
261 bpc = radeon_get_monitor_bpc(connector);
262 dither = radeon_connector->dither;
265 /* LVDS FMT is set up by atom */
266 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
269 /* not needed for analog */
270 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
271 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
279 if (dither == RADEON_FMT_DITHER_ENABLE)
280 /* XXX sort out optimal dither settings */
281 tmp |= FMT_SPATIAL_DITHER_EN;
283 tmp |= FMT_TRUNCATE_EN;
286 if (dither == RADEON_FMT_DITHER_ENABLE)
287 /* XXX sort out optimal dither settings */
288 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
290 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
298 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
301 /* get temperature in millidegrees */
302 int rv6xx_get_temp(struct radeon_device *rdev)
304 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
306 int actual_temp = temp & 0xff;
311 return actual_temp * 1000;
314 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
318 rdev->pm.dynpm_can_upclock = true;
319 rdev->pm.dynpm_can_downclock = true;
321 /* power state array is low to high, default is first */
322 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
323 int min_power_state_index = 0;
325 if (rdev->pm.num_power_states > 2)
326 min_power_state_index = 1;
328 switch (rdev->pm.dynpm_planned_action) {
329 case DYNPM_ACTION_MINIMUM:
330 rdev->pm.requested_power_state_index = min_power_state_index;
331 rdev->pm.requested_clock_mode_index = 0;
332 rdev->pm.dynpm_can_downclock = false;
334 case DYNPM_ACTION_DOWNCLOCK:
335 if (rdev->pm.current_power_state_index == min_power_state_index) {
336 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
337 rdev->pm.dynpm_can_downclock = false;
339 if (rdev->pm.active_crtc_count > 1) {
340 for (i = 0; i < rdev->pm.num_power_states; i++) {
341 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
343 else if (i >= rdev->pm.current_power_state_index) {
344 rdev->pm.requested_power_state_index =
345 rdev->pm.current_power_state_index;
348 rdev->pm.requested_power_state_index = i;
353 if (rdev->pm.current_power_state_index == 0)
354 rdev->pm.requested_power_state_index =
355 rdev->pm.num_power_states - 1;
357 rdev->pm.requested_power_state_index =
358 rdev->pm.current_power_state_index - 1;
361 rdev->pm.requested_clock_mode_index = 0;
362 /* don't use the power state if crtcs are active and no display flag is set */
363 if ((rdev->pm.active_crtc_count > 0) &&
364 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
365 clock_info[rdev->pm.requested_clock_mode_index].flags &
366 RADEON_PM_MODE_NO_DISPLAY)) {
367 rdev->pm.requested_power_state_index++;
370 case DYNPM_ACTION_UPCLOCK:
371 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
372 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
373 rdev->pm.dynpm_can_upclock = false;
375 if (rdev->pm.active_crtc_count > 1) {
376 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
377 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
379 else if (i <= rdev->pm.current_power_state_index) {
380 rdev->pm.requested_power_state_index =
381 rdev->pm.current_power_state_index;
384 rdev->pm.requested_power_state_index = i;
389 rdev->pm.requested_power_state_index =
390 rdev->pm.current_power_state_index + 1;
392 rdev->pm.requested_clock_mode_index = 0;
394 case DYNPM_ACTION_DEFAULT:
395 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
396 rdev->pm.requested_clock_mode_index = 0;
397 rdev->pm.dynpm_can_upclock = false;
399 case DYNPM_ACTION_NONE:
401 DRM_ERROR("Requested mode for not defined action\n");
405 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
406 /* for now just select the first power state and switch between clock modes */
407 /* power state array is low to high, default is first (0) */
408 if (rdev->pm.active_crtc_count > 1) {
409 rdev->pm.requested_power_state_index = -1;
410 /* start at 1 as we don't want the default mode */
411 for (i = 1; i < rdev->pm.num_power_states; i++) {
412 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
414 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
415 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
416 rdev->pm.requested_power_state_index = i;
420 /* if nothing selected, grab the default state. */
421 if (rdev->pm.requested_power_state_index == -1)
422 rdev->pm.requested_power_state_index = 0;
424 rdev->pm.requested_power_state_index = 1;
426 switch (rdev->pm.dynpm_planned_action) {
427 case DYNPM_ACTION_MINIMUM:
428 rdev->pm.requested_clock_mode_index = 0;
429 rdev->pm.dynpm_can_downclock = false;
431 case DYNPM_ACTION_DOWNCLOCK:
432 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
433 if (rdev->pm.current_clock_mode_index == 0) {
434 rdev->pm.requested_clock_mode_index = 0;
435 rdev->pm.dynpm_can_downclock = false;
437 rdev->pm.requested_clock_mode_index =
438 rdev->pm.current_clock_mode_index - 1;
440 rdev->pm.requested_clock_mode_index = 0;
441 rdev->pm.dynpm_can_downclock = false;
443 /* don't use the power state if crtcs are active and no display flag is set */
444 if ((rdev->pm.active_crtc_count > 0) &&
445 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
446 clock_info[rdev->pm.requested_clock_mode_index].flags &
447 RADEON_PM_MODE_NO_DISPLAY)) {
448 rdev->pm.requested_clock_mode_index++;
451 case DYNPM_ACTION_UPCLOCK:
452 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
453 if (rdev->pm.current_clock_mode_index ==
454 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
455 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
456 rdev->pm.dynpm_can_upclock = false;
458 rdev->pm.requested_clock_mode_index =
459 rdev->pm.current_clock_mode_index + 1;
461 rdev->pm.requested_clock_mode_index =
462 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
463 rdev->pm.dynpm_can_upclock = false;
466 case DYNPM_ACTION_DEFAULT:
467 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
468 rdev->pm.requested_clock_mode_index = 0;
469 rdev->pm.dynpm_can_upclock = false;
471 case DYNPM_ACTION_NONE:
473 DRM_ERROR("Requested mode for not defined action\n");
478 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
479 rdev->pm.power_state[rdev->pm.requested_power_state_index].
480 clock_info[rdev->pm.requested_clock_mode_index].sclk,
481 rdev->pm.power_state[rdev->pm.requested_power_state_index].
482 clock_info[rdev->pm.requested_clock_mode_index].mclk,
483 rdev->pm.power_state[rdev->pm.requested_power_state_index].
487 void rs780_pm_init_profile(struct radeon_device *rdev)
489 if (rdev->pm.num_power_states == 2) {
491 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
492 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
493 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
494 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
497 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
498 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
499 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
502 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
504 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
508 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
509 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
511 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
512 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
516 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
517 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
521 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
522 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
523 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
524 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
525 } else if (rdev->pm.num_power_states == 3) {
527 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
528 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
529 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
530 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
532 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
533 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
534 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
535 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
537 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
538 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
539 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
540 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
542 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
543 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
544 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
545 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
548 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
550 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
553 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
554 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
555 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
557 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
558 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
559 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
560 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
563 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
564 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
565 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
566 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
568 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
569 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
570 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
571 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
573 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
574 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
575 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
576 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
578 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
579 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
580 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
581 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
583 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
584 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
585 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
586 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
588 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
589 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
590 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
591 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
593 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
594 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
595 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
596 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
600 void r600_pm_init_profile(struct radeon_device *rdev)
604 if (rdev->family == CHIP_R600) {
607 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
608 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
609 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
610 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
612 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
613 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
614 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
615 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
617 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
618 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
619 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
620 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
622 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
623 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
624 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
625 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
627 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
628 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
629 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
630 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
632 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
633 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
634 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
635 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
637 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
638 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
639 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
640 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
642 if (rdev->pm.num_power_states < 4) {
644 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
645 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
646 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
647 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
649 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
650 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
651 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
652 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
654 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
655 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
656 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
657 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
659 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
660 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
661 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
662 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
664 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
665 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
666 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
667 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
669 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
670 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
671 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
672 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
674 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
675 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
676 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
677 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
680 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
681 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
682 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
683 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
685 if (rdev->flags & RADEON_IS_MOBILITY)
686 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
688 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
689 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
690 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
691 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
692 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
694 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
695 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
696 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
697 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
699 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
700 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
701 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
702 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
703 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
705 if (rdev->flags & RADEON_IS_MOBILITY)
706 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
708 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
709 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
710 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
711 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
712 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
714 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
715 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
716 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
717 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
719 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
720 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
721 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
722 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
723 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
728 void r600_pm_misc(struct radeon_device *rdev)
730 int req_ps_idx = rdev->pm.requested_power_state_index;
731 int req_cm_idx = rdev->pm.requested_clock_mode_index;
732 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
733 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
735 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
736 /* 0xff01 is a flag rather then an actual voltage */
737 if (voltage->voltage == 0xff01)
739 if (voltage->voltage != rdev->pm.current_vddc) {
740 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
741 rdev->pm.current_vddc = voltage->voltage;
742 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
747 bool r600_gui_idle(struct radeon_device *rdev)
749 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
755 /* hpd for digital panel detect/disconnect */
756 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
758 bool connected = false;
760 if (ASIC_IS_DCE3(rdev)) {
763 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
767 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
771 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
775 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
780 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
784 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
793 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
797 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
801 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
811 void r600_hpd_set_polarity(struct radeon_device *rdev,
812 enum radeon_hpd_id hpd)
815 bool connected = r600_hpd_sense(rdev, hpd);
817 if (ASIC_IS_DCE3(rdev)) {
820 tmp = RREG32(DC_HPD1_INT_CONTROL);
822 tmp &= ~DC_HPDx_INT_POLARITY;
824 tmp |= DC_HPDx_INT_POLARITY;
825 WREG32(DC_HPD1_INT_CONTROL, tmp);
828 tmp = RREG32(DC_HPD2_INT_CONTROL);
830 tmp &= ~DC_HPDx_INT_POLARITY;
832 tmp |= DC_HPDx_INT_POLARITY;
833 WREG32(DC_HPD2_INT_CONTROL, tmp);
836 tmp = RREG32(DC_HPD3_INT_CONTROL);
838 tmp &= ~DC_HPDx_INT_POLARITY;
840 tmp |= DC_HPDx_INT_POLARITY;
841 WREG32(DC_HPD3_INT_CONTROL, tmp);
844 tmp = RREG32(DC_HPD4_INT_CONTROL);
846 tmp &= ~DC_HPDx_INT_POLARITY;
848 tmp |= DC_HPDx_INT_POLARITY;
849 WREG32(DC_HPD4_INT_CONTROL, tmp);
852 tmp = RREG32(DC_HPD5_INT_CONTROL);
854 tmp &= ~DC_HPDx_INT_POLARITY;
856 tmp |= DC_HPDx_INT_POLARITY;
857 WREG32(DC_HPD5_INT_CONTROL, tmp);
861 tmp = RREG32(DC_HPD6_INT_CONTROL);
863 tmp &= ~DC_HPDx_INT_POLARITY;
865 tmp |= DC_HPDx_INT_POLARITY;
866 WREG32(DC_HPD6_INT_CONTROL, tmp);
874 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
876 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
878 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
879 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
882 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
884 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
886 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
887 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
890 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
892 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
894 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
895 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
903 void r600_hpd_init(struct radeon_device *rdev)
905 struct drm_device *dev = rdev->ddev;
906 struct drm_connector *connector;
909 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
910 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
912 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
913 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
914 /* don't try to enable hpd on eDP or LVDS avoid breaking the
915 * aux dp channel on imac and help (but not completely fix)
916 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
920 if (ASIC_IS_DCE3(rdev)) {
921 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
922 if (ASIC_IS_DCE32(rdev))
925 switch (radeon_connector->hpd.hpd) {
927 WREG32(DC_HPD1_CONTROL, tmp);
930 WREG32(DC_HPD2_CONTROL, tmp);
933 WREG32(DC_HPD3_CONTROL, tmp);
936 WREG32(DC_HPD4_CONTROL, tmp);
940 WREG32(DC_HPD5_CONTROL, tmp);
943 WREG32(DC_HPD6_CONTROL, tmp);
949 switch (radeon_connector->hpd.hpd) {
951 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
954 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
957 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
963 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
964 enable |= 1 << radeon_connector->hpd.hpd;
965 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
967 radeon_irq_kms_enable_hpd(rdev, enable);
970 void r600_hpd_fini(struct radeon_device *rdev)
972 struct drm_device *dev = rdev->ddev;
973 struct drm_connector *connector;
974 unsigned disable = 0;
976 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
977 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
978 if (ASIC_IS_DCE3(rdev)) {
979 switch (radeon_connector->hpd.hpd) {
981 WREG32(DC_HPD1_CONTROL, 0);
984 WREG32(DC_HPD2_CONTROL, 0);
987 WREG32(DC_HPD3_CONTROL, 0);
990 WREG32(DC_HPD4_CONTROL, 0);
994 WREG32(DC_HPD5_CONTROL, 0);
997 WREG32(DC_HPD6_CONTROL, 0);
1003 switch (radeon_connector->hpd.hpd) {
1005 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
1008 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
1011 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
1017 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
1018 disable |= 1 << radeon_connector->hpd.hpd;
1020 radeon_irq_kms_disable_hpd(rdev, disable);
1026 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
1031 /* flush hdp cache so updates hit vram */
1032 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
1033 !(rdev->flags & RADEON_IS_AGP)) {
1034 void __iomem *ptr = (void *)rdev->gart.ptr;
1037 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
1038 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
1039 * This seems to cause problems on some AGP cards. Just use the old
1042 WREG32(HDP_DEBUG1, 0);
1043 tmp = readl((void __iomem *)ptr);
1045 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1047 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
1048 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
1049 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1050 for (i = 0; i < rdev->usec_timeout; i++) {
1051 /* read MC_STATUS */
1052 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1053 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1055 pr_warn("[drm] r600 flush TLB failed\n");
1065 int r600_pcie_gart_init(struct radeon_device *rdev)
1069 if (rdev->gart.robj) {
1070 WARN(1, "R600 PCIE GART already initialized\n");
1073 /* Initialize common gart structure */
1074 r = radeon_gart_init(rdev);
1077 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
1078 return radeon_gart_table_vram_alloc(rdev);
1081 static int r600_pcie_gart_enable(struct radeon_device *rdev)
1086 if (rdev->gart.robj == NULL) {
1087 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1090 r = radeon_gart_table_vram_pin(rdev);
1094 /* Setup L2 cache */
1095 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1096 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1097 EFFECTIVE_L2_QUEUE_SIZE(7));
1098 WREG32(VM_L2_CNTL2, 0);
1099 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1100 /* Setup TLB control */
1101 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1102 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1103 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1104 ENABLE_WAIT_L2_QUERY;
1105 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1106 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1107 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1108 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1109 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1110 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1111 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1112 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1113 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1114 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1115 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1116 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1117 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1118 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1119 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1120 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1121 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1122 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1123 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1124 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1125 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1126 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1127 (u32)(rdev->dummy_page.addr >> 12));
1128 for (i = 1; i < 7; i++)
1129 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1131 r600_pcie_gart_tlb_flush(rdev);
1132 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1133 (unsigned)(rdev->mc.gtt_size >> 20),
1134 (unsigned long long)rdev->gart.table_addr);
1135 rdev->gart.ready = true;
1139 static void r600_pcie_gart_disable(struct radeon_device *rdev)
1144 /* Disable all tables */
1145 for (i = 0; i < 7; i++)
1146 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1148 /* Disable L2 cache */
1149 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1150 EFFECTIVE_L2_QUEUE_SIZE(7));
1151 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1152 /* Setup L1 TLB control */
1153 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1154 ENABLE_WAIT_L2_QUERY;
1155 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1156 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1157 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1158 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1159 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1160 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1161 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1162 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1163 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1164 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1165 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1166 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1167 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1168 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1169 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1170 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1171 radeon_gart_table_vram_unpin(rdev);
1174 static void r600_pcie_gart_fini(struct radeon_device *rdev)
1176 radeon_gart_fini(rdev);
1177 r600_pcie_gart_disable(rdev);
1178 radeon_gart_table_vram_free(rdev);
1181 static void r600_agp_enable(struct radeon_device *rdev)
1186 /* Setup L2 cache */
1187 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1188 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1189 EFFECTIVE_L2_QUEUE_SIZE(7));
1190 WREG32(VM_L2_CNTL2, 0);
1191 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1192 /* Setup TLB control */
1193 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1194 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1195 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1196 ENABLE_WAIT_L2_QUERY;
1197 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1198 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1199 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1200 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1201 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1202 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1203 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1204 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1205 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1206 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1207 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1208 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1209 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1210 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1211 for (i = 0; i < 7; i++)
1212 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1215 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1220 for (i = 0; i < rdev->usec_timeout; i++) {
1221 /* read MC_STATUS */
1222 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1230 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1232 unsigned long flags;
1235 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1236 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1237 r = RREG32(R_0028FC_MC_DATA);
1238 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1239 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1243 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1245 unsigned long flags;
1247 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1248 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1249 S_0028F8_MC_IND_WR_EN(1));
1250 WREG32(R_0028FC_MC_DATA, v);
1251 WREG32(R_0028F8_MC_INDEX, 0x7F);
1252 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1255 static void r600_mc_program(struct radeon_device *rdev)
1257 struct rv515_mc_save save;
1261 /* Initialize HDP */
1262 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1263 WREG32((0x2c14 + j), 0x00000000);
1264 WREG32((0x2c18 + j), 0x00000000);
1265 WREG32((0x2c1c + j), 0x00000000);
1266 WREG32((0x2c20 + j), 0x00000000);
1267 WREG32((0x2c24 + j), 0x00000000);
1269 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1271 rv515_mc_stop(rdev, &save);
1272 if (r600_mc_wait_for_idle(rdev)) {
1273 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1275 /* Lockout access through VGA aperture (doesn't exist before R600) */
1276 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1277 /* Update configuration */
1278 if (rdev->flags & RADEON_IS_AGP) {
1279 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1280 /* VRAM before AGP */
1281 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1282 rdev->mc.vram_start >> 12);
1283 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1284 rdev->mc.gtt_end >> 12);
1286 /* VRAM after AGP */
1287 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1288 rdev->mc.gtt_start >> 12);
1289 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1290 rdev->mc.vram_end >> 12);
1293 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1294 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1296 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1297 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1298 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1299 WREG32(MC_VM_FB_LOCATION, tmp);
1300 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1301 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1302 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1303 if (rdev->flags & RADEON_IS_AGP) {
1304 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1305 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1306 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1308 WREG32(MC_VM_AGP_BASE, 0);
1309 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1310 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1312 if (r600_mc_wait_for_idle(rdev)) {
1313 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1315 rv515_mc_resume(rdev, &save);
1316 /* we need to own VRAM, so turn off the VGA renderer here
1317 * to stop it overwriting our objects */
1318 rv515_vga_render_disable(rdev);
1322 * r600_vram_gtt_location - try to find VRAM & GTT location
1323 * @rdev: radeon device structure holding all necessary informations
1324 * @mc: memory controller structure holding memory informations
1326 * Function will place try to place VRAM at same place as in CPU (PCI)
1327 * address space as some GPU seems to have issue when we reprogram at
1328 * different address space.
1330 * If there is not enough space to fit the unvisible VRAM after the
1331 * aperture then we limit the VRAM size to the aperture.
1333 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1334 * them to be in one from GPU point of view so that we can program GPU to
1335 * catch access outside them (weird GPU policy see ??).
1337 * This function will never fails, worst case are limiting VRAM or GTT.
1339 * Note: GTT start, end, size should be initialized before calling this
1340 * function on AGP platform.
1342 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1344 u64 size_bf, size_af;
1346 if (mc->mc_vram_size > 0xE0000000) {
1347 /* leave room for at least 512M GTT */
1348 dev_warn(rdev->dev, "limiting VRAM\n");
1349 mc->real_vram_size = 0xE0000000;
1350 mc->mc_vram_size = 0xE0000000;
1352 if (rdev->flags & RADEON_IS_AGP) {
1353 size_bf = mc->gtt_start;
1354 size_af = mc->mc_mask - mc->gtt_end;
1355 if (size_bf > size_af) {
1356 if (mc->mc_vram_size > size_bf) {
1357 dev_warn(rdev->dev, "limiting VRAM\n");
1358 mc->real_vram_size = size_bf;
1359 mc->mc_vram_size = size_bf;
1361 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1363 if (mc->mc_vram_size > size_af) {
1364 dev_warn(rdev->dev, "limiting VRAM\n");
1365 mc->real_vram_size = size_af;
1366 mc->mc_vram_size = size_af;
1368 mc->vram_start = mc->gtt_end + 1;
1370 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1371 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1372 mc->mc_vram_size >> 20, mc->vram_start,
1373 mc->vram_end, mc->real_vram_size >> 20);
1376 if (rdev->flags & RADEON_IS_IGP) {
1377 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1380 radeon_vram_location(rdev, &rdev->mc, base);
1381 rdev->mc.gtt_base_align = 0;
1382 radeon_gtt_location(rdev, mc);
1386 static int r600_mc_init(struct radeon_device *rdev)
1389 int chansize, numchan;
1390 uint32_t h_addr, l_addr;
1391 unsigned long long k8_addr;
1393 /* Get VRAM informations */
1394 rdev->mc.vram_is_ddr = true;
1395 tmp = RREG32(RAMCFG);
1396 if (tmp & CHANSIZE_OVERRIDE) {
1398 } else if (tmp & CHANSIZE_MASK) {
1403 tmp = RREG32(CHMAP);
1404 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1419 rdev->mc.vram_width = numchan * chansize;
1420 /* Could aper size report 0 ? */
1421 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1422 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1423 /* Setup GPU memory space */
1424 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1425 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1426 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1427 r600_vram_gtt_location(rdev, &rdev->mc);
1429 if (rdev->flags & RADEON_IS_IGP) {
1430 rs690_pm_info(rdev);
1431 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1433 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1434 /* Use K8 direct mapping for fast fb access. */
1435 rdev->fastfb_working = false;
1436 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1437 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1438 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1439 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1440 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1443 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1444 * memory is present.
1446 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1447 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1448 (unsigned long long)rdev->mc.aper_base, k8_addr);
1449 rdev->mc.aper_base = (resource_size_t)k8_addr;
1450 rdev->fastfb_working = true;
1456 radeon_update_bandwidth_info(rdev);
1460 int r600_vram_scratch_init(struct radeon_device *rdev)
1464 if (rdev->vram_scratch.robj == NULL) {
1465 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1466 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1467 0, NULL, NULL, &rdev->vram_scratch.robj);
1473 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1474 if (unlikely(r != 0))
1476 r = radeon_bo_pin(rdev->vram_scratch.robj,
1477 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1479 radeon_bo_unreserve(rdev->vram_scratch.robj);
1482 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1483 (void **)&rdev->vram_scratch.ptr);
1485 radeon_bo_unpin(rdev->vram_scratch.robj);
1486 radeon_bo_unreserve(rdev->vram_scratch.robj);
1491 void r600_vram_scratch_fini(struct radeon_device *rdev)
1495 if (rdev->vram_scratch.robj == NULL) {
1498 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1499 if (likely(r == 0)) {
1500 radeon_bo_kunmap(rdev->vram_scratch.robj);
1501 radeon_bo_unpin(rdev->vram_scratch.robj);
1502 radeon_bo_unreserve(rdev->vram_scratch.robj);
1504 radeon_bo_unref(&rdev->vram_scratch.robj);
1507 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1509 u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1512 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1514 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1516 WREG32(R600_BIOS_3_SCRATCH, tmp);
1519 static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1521 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1522 RREG32(R_008010_GRBM_STATUS));
1523 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1524 RREG32(R_008014_GRBM_STATUS2));
1525 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1526 RREG32(R_000E50_SRBM_STATUS));
1527 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1528 RREG32(CP_STALLED_STAT1));
1529 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1530 RREG32(CP_STALLED_STAT2));
1531 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1532 RREG32(CP_BUSY_STAT));
1533 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1535 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1536 RREG32(DMA_STATUS_REG));
1539 static bool r600_is_display_hung(struct radeon_device *rdev)
1545 for (i = 0; i < rdev->num_crtc; i++) {
1546 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1547 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1548 crtc_hung |= (1 << i);
1552 for (j = 0; j < 10; j++) {
1553 for (i = 0; i < rdev->num_crtc; i++) {
1554 if (crtc_hung & (1 << i)) {
1555 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1556 if (tmp != crtc_status[i])
1557 crtc_hung &= ~(1 << i);
1568 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1574 tmp = RREG32(R_008010_GRBM_STATUS);
1575 if (rdev->family >= CHIP_RV770) {
1576 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1577 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1578 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1579 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1580 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1581 reset_mask |= RADEON_RESET_GFX;
1583 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1584 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1585 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1586 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1587 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1588 reset_mask |= RADEON_RESET_GFX;
1591 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1592 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1593 reset_mask |= RADEON_RESET_CP;
1595 if (G_008010_GRBM_EE_BUSY(tmp))
1596 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1598 /* DMA_STATUS_REG */
1599 tmp = RREG32(DMA_STATUS_REG);
1600 if (!(tmp & DMA_IDLE))
1601 reset_mask |= RADEON_RESET_DMA;
1604 tmp = RREG32(R_000E50_SRBM_STATUS);
1605 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1606 reset_mask |= RADEON_RESET_RLC;
1608 if (G_000E50_IH_BUSY(tmp))
1609 reset_mask |= RADEON_RESET_IH;
1611 if (G_000E50_SEM_BUSY(tmp))
1612 reset_mask |= RADEON_RESET_SEM;
1614 if (G_000E50_GRBM_RQ_PENDING(tmp))
1615 reset_mask |= RADEON_RESET_GRBM;
1617 if (G_000E50_VMC_BUSY(tmp))
1618 reset_mask |= RADEON_RESET_VMC;
1620 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1621 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1622 G_000E50_MCDW_BUSY(tmp))
1623 reset_mask |= RADEON_RESET_MC;
1625 if (r600_is_display_hung(rdev))
1626 reset_mask |= RADEON_RESET_DISPLAY;
1628 /* Skip MC reset as it's mostly likely not hung, just busy */
1629 if (reset_mask & RADEON_RESET_MC) {
1630 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1631 reset_mask &= ~RADEON_RESET_MC;
1637 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1639 struct rv515_mc_save save;
1640 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1643 if (reset_mask == 0)
1646 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1648 r600_print_gpu_status_regs(rdev);
1650 /* Disable CP parsing/prefetching */
1651 if (rdev->family >= CHIP_RV770)
1652 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1654 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1656 /* disable the RLC */
1657 WREG32(RLC_CNTL, 0);
1659 if (reset_mask & RADEON_RESET_DMA) {
1661 tmp = RREG32(DMA_RB_CNTL);
1662 tmp &= ~DMA_RB_ENABLE;
1663 WREG32(DMA_RB_CNTL, tmp);
1668 rv515_mc_stop(rdev, &save);
1669 if (r600_mc_wait_for_idle(rdev)) {
1670 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1673 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1674 if (rdev->family >= CHIP_RV770)
1675 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1676 S_008020_SOFT_RESET_CB(1) |
1677 S_008020_SOFT_RESET_PA(1) |
1678 S_008020_SOFT_RESET_SC(1) |
1679 S_008020_SOFT_RESET_SPI(1) |
1680 S_008020_SOFT_RESET_SX(1) |
1681 S_008020_SOFT_RESET_SH(1) |
1682 S_008020_SOFT_RESET_TC(1) |
1683 S_008020_SOFT_RESET_TA(1) |
1684 S_008020_SOFT_RESET_VC(1) |
1685 S_008020_SOFT_RESET_VGT(1);
1687 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1688 S_008020_SOFT_RESET_DB(1) |
1689 S_008020_SOFT_RESET_CB(1) |
1690 S_008020_SOFT_RESET_PA(1) |
1691 S_008020_SOFT_RESET_SC(1) |
1692 S_008020_SOFT_RESET_SMX(1) |
1693 S_008020_SOFT_RESET_SPI(1) |
1694 S_008020_SOFT_RESET_SX(1) |
1695 S_008020_SOFT_RESET_SH(1) |
1696 S_008020_SOFT_RESET_TC(1) |
1697 S_008020_SOFT_RESET_TA(1) |
1698 S_008020_SOFT_RESET_VC(1) |
1699 S_008020_SOFT_RESET_VGT(1);
1702 if (reset_mask & RADEON_RESET_CP) {
1703 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1704 S_008020_SOFT_RESET_VGT(1);
1706 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1709 if (reset_mask & RADEON_RESET_DMA) {
1710 if (rdev->family >= CHIP_RV770)
1711 srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1713 srbm_soft_reset |= SOFT_RESET_DMA;
1716 if (reset_mask & RADEON_RESET_RLC)
1717 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1719 if (reset_mask & RADEON_RESET_SEM)
1720 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1722 if (reset_mask & RADEON_RESET_IH)
1723 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1725 if (reset_mask & RADEON_RESET_GRBM)
1726 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1728 if (!(rdev->flags & RADEON_IS_IGP)) {
1729 if (reset_mask & RADEON_RESET_MC)
1730 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1733 if (reset_mask & RADEON_RESET_VMC)
1734 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1736 if (grbm_soft_reset) {
1737 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1738 tmp |= grbm_soft_reset;
1739 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1740 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1741 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1745 tmp &= ~grbm_soft_reset;
1746 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1747 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1750 if (srbm_soft_reset) {
1751 tmp = RREG32(SRBM_SOFT_RESET);
1752 tmp |= srbm_soft_reset;
1753 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1754 WREG32(SRBM_SOFT_RESET, tmp);
1755 tmp = RREG32(SRBM_SOFT_RESET);
1759 tmp &= ~srbm_soft_reset;
1760 WREG32(SRBM_SOFT_RESET, tmp);
1761 tmp = RREG32(SRBM_SOFT_RESET);
1764 /* Wait a little for things to settle down */
1767 rv515_mc_resume(rdev, &save);
1770 r600_print_gpu_status_regs(rdev);
1773 static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1775 struct rv515_mc_save save;
1778 dev_info(rdev->dev, "GPU pci config reset\n");
1782 /* Disable CP parsing/prefetching */
1783 if (rdev->family >= CHIP_RV770)
1784 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1786 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1788 /* disable the RLC */
1789 WREG32(RLC_CNTL, 0);
1792 tmp = RREG32(DMA_RB_CNTL);
1793 tmp &= ~DMA_RB_ENABLE;
1794 WREG32(DMA_RB_CNTL, tmp);
1798 /* set mclk/sclk to bypass */
1799 if (rdev->family >= CHIP_RV770)
1800 rv770_set_clk_bypass_mode(rdev);
1802 pci_clear_master(rdev->pdev);
1803 /* disable mem access */
1804 rv515_mc_stop(rdev, &save);
1805 if (r600_mc_wait_for_idle(rdev)) {
1806 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1809 /* BIF reset workaround. Not sure if this is needed on 6xx */
1810 tmp = RREG32(BUS_CNTL);
1811 tmp |= VGA_COHE_SPEC_TIMER_DIS;
1812 WREG32(BUS_CNTL, tmp);
1814 tmp = RREG32(BIF_SCRATCH0);
1817 radeon_pci_config_reset(rdev);
1820 /* BIF reset workaround. Not sure if this is needed on 6xx */
1821 tmp = SOFT_RESET_BIF;
1822 WREG32(SRBM_SOFT_RESET, tmp);
1824 WREG32(SRBM_SOFT_RESET, 0);
1826 /* wait for asic to come out of reset */
1827 for (i = 0; i < rdev->usec_timeout; i++) {
1828 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1834 int r600_asic_reset(struct radeon_device *rdev, bool hard)
1839 r600_gpu_pci_config_reset(rdev);
1843 reset_mask = r600_gpu_check_soft_reset(rdev);
1846 r600_set_bios_scratch_engine_hung(rdev, true);
1848 /* try soft reset */
1849 r600_gpu_soft_reset(rdev, reset_mask);
1851 reset_mask = r600_gpu_check_soft_reset(rdev);
1853 /* try pci config reset */
1854 if (reset_mask && radeon_hard_reset)
1855 r600_gpu_pci_config_reset(rdev);
1857 reset_mask = r600_gpu_check_soft_reset(rdev);
1860 r600_set_bios_scratch_engine_hung(rdev, false);
1866 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1868 * @rdev: radeon_device pointer
1869 * @ring: radeon_ring structure holding ring information
1871 * Check if the GFX engine is locked up.
1872 * Returns true if the engine appears to be locked up, false if not.
1874 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1876 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1878 if (!(reset_mask & (RADEON_RESET_GFX |
1879 RADEON_RESET_COMPUTE |
1880 RADEON_RESET_CP))) {
1881 radeon_ring_lockup_update(rdev, ring);
1884 return radeon_ring_test_lockup(rdev, ring);
1887 u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1888 u32 tiling_pipe_num,
1890 u32 total_max_rb_num,
1891 u32 disabled_rb_mask)
1893 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1894 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1895 u32 data = 0, mask = 1 << (max_rb_num - 1);
1898 /* mask out the RBs that don't exist on that asic */
1899 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1900 /* make sure at least one RB is available */
1901 if ((tmp & 0xff) != 0xff)
1902 disabled_rb_mask = tmp;
1904 rendering_pipe_num = 1 << tiling_pipe_num;
1905 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1906 BUG_ON(rendering_pipe_num < req_rb_num);
1908 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1909 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1911 if (rdev->family <= CHIP_RV740) {
1919 for (i = 0; i < max_rb_num; i++) {
1920 if (!(mask & disabled_rb_mask)) {
1921 for (j = 0; j < pipe_rb_ratio; j++) {
1922 data <<= rb_num_width;
1923 data |= max_rb_num - i - 1;
1925 if (pipe_rb_remain) {
1926 data <<= rb_num_width;
1927 data |= max_rb_num - i - 1;
1937 int r600_count_pipe_bits(uint32_t val)
1939 return hweight32(val);
1942 static void r600_gpu_init(struct radeon_device *rdev)
1946 u32 cc_gc_shader_pipe_config;
1950 u32 sq_gpr_resource_mgmt_1 = 0;
1951 u32 sq_gpr_resource_mgmt_2 = 0;
1952 u32 sq_thread_resource_mgmt = 0;
1953 u32 sq_stack_resource_mgmt_1 = 0;
1954 u32 sq_stack_resource_mgmt_2 = 0;
1955 u32 disabled_rb_mask;
1957 rdev->config.r600.tiling_group_size = 256;
1958 switch (rdev->family) {
1960 rdev->config.r600.max_pipes = 4;
1961 rdev->config.r600.max_tile_pipes = 8;
1962 rdev->config.r600.max_simds = 4;
1963 rdev->config.r600.max_backends = 4;
1964 rdev->config.r600.max_gprs = 256;
1965 rdev->config.r600.max_threads = 192;
1966 rdev->config.r600.max_stack_entries = 256;
1967 rdev->config.r600.max_hw_contexts = 8;
1968 rdev->config.r600.max_gs_threads = 16;
1969 rdev->config.r600.sx_max_export_size = 128;
1970 rdev->config.r600.sx_max_export_pos_size = 16;
1971 rdev->config.r600.sx_max_export_smx_size = 128;
1972 rdev->config.r600.sq_num_cf_insts = 2;
1976 rdev->config.r600.max_pipes = 2;
1977 rdev->config.r600.max_tile_pipes = 2;
1978 rdev->config.r600.max_simds = 3;
1979 rdev->config.r600.max_backends = 1;
1980 rdev->config.r600.max_gprs = 128;
1981 rdev->config.r600.max_threads = 192;
1982 rdev->config.r600.max_stack_entries = 128;
1983 rdev->config.r600.max_hw_contexts = 8;
1984 rdev->config.r600.max_gs_threads = 4;
1985 rdev->config.r600.sx_max_export_size = 128;
1986 rdev->config.r600.sx_max_export_pos_size = 16;
1987 rdev->config.r600.sx_max_export_smx_size = 128;
1988 rdev->config.r600.sq_num_cf_insts = 2;
1994 rdev->config.r600.max_pipes = 1;
1995 rdev->config.r600.max_tile_pipes = 1;
1996 rdev->config.r600.max_simds = 2;
1997 rdev->config.r600.max_backends = 1;
1998 rdev->config.r600.max_gprs = 128;
1999 rdev->config.r600.max_threads = 192;
2000 rdev->config.r600.max_stack_entries = 128;
2001 rdev->config.r600.max_hw_contexts = 4;
2002 rdev->config.r600.max_gs_threads = 4;
2003 rdev->config.r600.sx_max_export_size = 128;
2004 rdev->config.r600.sx_max_export_pos_size = 16;
2005 rdev->config.r600.sx_max_export_smx_size = 128;
2006 rdev->config.r600.sq_num_cf_insts = 1;
2009 rdev->config.r600.max_pipes = 4;
2010 rdev->config.r600.max_tile_pipes = 4;
2011 rdev->config.r600.max_simds = 4;
2012 rdev->config.r600.max_backends = 4;
2013 rdev->config.r600.max_gprs = 192;
2014 rdev->config.r600.max_threads = 192;
2015 rdev->config.r600.max_stack_entries = 256;
2016 rdev->config.r600.max_hw_contexts = 8;
2017 rdev->config.r600.max_gs_threads = 16;
2018 rdev->config.r600.sx_max_export_size = 128;
2019 rdev->config.r600.sx_max_export_pos_size = 16;
2020 rdev->config.r600.sx_max_export_smx_size = 128;
2021 rdev->config.r600.sq_num_cf_insts = 2;
2027 /* Initialize HDP */
2028 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2029 WREG32((0x2c14 + j), 0x00000000);
2030 WREG32((0x2c18 + j), 0x00000000);
2031 WREG32((0x2c1c + j), 0x00000000);
2032 WREG32((0x2c20 + j), 0x00000000);
2033 WREG32((0x2c24 + j), 0x00000000);
2036 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2040 ramcfg = RREG32(RAMCFG);
2041 switch (rdev->config.r600.max_tile_pipes) {
2043 tiling_config |= PIPE_TILING(0);
2046 tiling_config |= PIPE_TILING(1);
2049 tiling_config |= PIPE_TILING(2);
2052 tiling_config |= PIPE_TILING(3);
2057 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
2058 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2059 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2060 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2062 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
2064 tiling_config |= ROW_TILING(3);
2065 tiling_config |= SAMPLE_SPLIT(3);
2067 tiling_config |= ROW_TILING(tmp);
2068 tiling_config |= SAMPLE_SPLIT(tmp);
2070 tiling_config |= BANK_SWAPS(1);
2072 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
2073 tmp = rdev->config.r600.max_simds -
2074 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
2075 rdev->config.r600.active_simds = tmp;
2077 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
2079 for (i = 0; i < rdev->config.r600.max_backends; i++)
2081 /* if all the backends are disabled, fix it up here */
2082 if ((disabled_rb_mask & tmp) == tmp) {
2083 for (i = 0; i < rdev->config.r600.max_backends; i++)
2084 disabled_rb_mask &= ~(1 << i);
2086 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
2087 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
2088 R6XX_MAX_BACKENDS, disabled_rb_mask);
2089 tiling_config |= tmp << 16;
2090 rdev->config.r600.backend_map = tmp;
2092 rdev->config.r600.tile_config = tiling_config;
2093 WREG32(GB_TILING_CONFIG, tiling_config);
2094 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
2095 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
2096 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
2098 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
2099 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
2100 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
2102 /* Setup some CP states */
2103 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
2104 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
2106 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
2107 SYNC_WALKER | SYNC_ALIGNER));
2108 /* Setup various GPU states */
2109 if (rdev->family == CHIP_RV670)
2110 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
2112 tmp = RREG32(SX_DEBUG_1);
2113 tmp |= SMX_EVENT_RELEASE;
2114 if ((rdev->family > CHIP_R600))
2115 tmp |= ENABLE_NEW_SMX_ADDRESS;
2116 WREG32(SX_DEBUG_1, tmp);
2118 if (((rdev->family) == CHIP_R600) ||
2119 ((rdev->family) == CHIP_RV630) ||
2120 ((rdev->family) == CHIP_RV610) ||
2121 ((rdev->family) == CHIP_RV620) ||
2122 ((rdev->family) == CHIP_RS780) ||
2123 ((rdev->family) == CHIP_RS880)) {
2124 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
2126 WREG32(DB_DEBUG, 0);
2128 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2129 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2131 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2132 WREG32(VGT_NUM_INSTANCES, 0);
2134 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2135 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2137 tmp = RREG32(SQ_MS_FIFO_SIZES);
2138 if (((rdev->family) == CHIP_RV610) ||
2139 ((rdev->family) == CHIP_RV620) ||
2140 ((rdev->family) == CHIP_RS780) ||
2141 ((rdev->family) == CHIP_RS880)) {
2142 tmp = (CACHE_FIFO_SIZE(0xa) |
2143 FETCH_FIFO_HIWATER(0xa) |
2144 DONE_FIFO_HIWATER(0xe0) |
2145 ALU_UPDATE_FIFO_HIWATER(0x8));
2146 } else if (((rdev->family) == CHIP_R600) ||
2147 ((rdev->family) == CHIP_RV630)) {
2148 tmp &= ~DONE_FIFO_HIWATER(0xff);
2149 tmp |= DONE_FIFO_HIWATER(0x4);
2151 WREG32(SQ_MS_FIFO_SIZES, tmp);
2153 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2154 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
2156 sq_config = RREG32(SQ_CONFIG);
2157 sq_config &= ~(PS_PRIO(3) |
2161 sq_config |= (DX9_CONSTS |
2168 if ((rdev->family) == CHIP_R600) {
2169 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2171 NUM_CLAUSE_TEMP_GPRS(4));
2172 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2174 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2175 NUM_VS_THREADS(48) |
2178 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2179 NUM_VS_STACK_ENTRIES(128));
2180 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2181 NUM_ES_STACK_ENTRIES(0));
2182 } else if (((rdev->family) == CHIP_RV610) ||
2183 ((rdev->family) == CHIP_RV620) ||
2184 ((rdev->family) == CHIP_RS780) ||
2185 ((rdev->family) == CHIP_RS880)) {
2186 /* no vertex cache */
2187 sq_config &= ~VC_ENABLE;
2189 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2191 NUM_CLAUSE_TEMP_GPRS(2));
2192 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2194 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2195 NUM_VS_THREADS(78) |
2197 NUM_ES_THREADS(31));
2198 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2199 NUM_VS_STACK_ENTRIES(40));
2200 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2201 NUM_ES_STACK_ENTRIES(16));
2202 } else if (((rdev->family) == CHIP_RV630) ||
2203 ((rdev->family) == CHIP_RV635)) {
2204 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2206 NUM_CLAUSE_TEMP_GPRS(2));
2207 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2209 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2210 NUM_VS_THREADS(78) |
2212 NUM_ES_THREADS(31));
2213 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2214 NUM_VS_STACK_ENTRIES(40));
2215 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2216 NUM_ES_STACK_ENTRIES(16));
2217 } else if ((rdev->family) == CHIP_RV670) {
2218 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2220 NUM_CLAUSE_TEMP_GPRS(2));
2221 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2223 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2224 NUM_VS_THREADS(78) |
2226 NUM_ES_THREADS(31));
2227 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2228 NUM_VS_STACK_ENTRIES(64));
2229 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2230 NUM_ES_STACK_ENTRIES(64));
2233 WREG32(SQ_CONFIG, sq_config);
2234 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2235 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2236 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2237 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2238 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2240 if (((rdev->family) == CHIP_RV610) ||
2241 ((rdev->family) == CHIP_RV620) ||
2242 ((rdev->family) == CHIP_RS780) ||
2243 ((rdev->family) == CHIP_RS880)) {
2244 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2246 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2249 /* More default values. 2D/3D driver should adjust as needed */
2250 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2251 S1_X(0x4) | S1_Y(0xc)));
2252 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2253 S1_X(0x2) | S1_Y(0x2) |
2254 S2_X(0xa) | S2_Y(0x6) |
2255 S3_X(0x6) | S3_Y(0xa)));
2256 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2257 S1_X(0x4) | S1_Y(0xc) |
2258 S2_X(0x1) | S2_Y(0x6) |
2259 S3_X(0xa) | S3_Y(0xe)));
2260 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2261 S5_X(0x0) | S5_Y(0x0) |
2262 S6_X(0xb) | S6_Y(0x4) |
2263 S7_X(0x7) | S7_Y(0x8)));
2265 WREG32(VGT_STRMOUT_EN, 0);
2266 tmp = rdev->config.r600.max_pipes * 16;
2267 switch (rdev->family) {
2283 WREG32(VGT_ES_PER_GS, 128);
2284 WREG32(VGT_GS_PER_ES, tmp);
2285 WREG32(VGT_GS_PER_VS, 2);
2286 WREG32(VGT_GS_VERTEX_REUSE, 16);
2288 /* more default values. 2D/3D driver should adjust as needed */
2289 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2290 WREG32(VGT_STRMOUT_EN, 0);
2292 WREG32(PA_SC_MODE_CNTL, 0);
2293 WREG32(PA_SC_AA_CONFIG, 0);
2294 WREG32(PA_SC_LINE_STIPPLE, 0);
2295 WREG32(SPI_INPUT_Z, 0);
2296 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2297 WREG32(CB_COLOR7_FRAG, 0);
2299 /* Clear render buffer base addresses */
2300 WREG32(CB_COLOR0_BASE, 0);
2301 WREG32(CB_COLOR1_BASE, 0);
2302 WREG32(CB_COLOR2_BASE, 0);
2303 WREG32(CB_COLOR3_BASE, 0);
2304 WREG32(CB_COLOR4_BASE, 0);
2305 WREG32(CB_COLOR5_BASE, 0);
2306 WREG32(CB_COLOR6_BASE, 0);
2307 WREG32(CB_COLOR7_BASE, 0);
2308 WREG32(CB_COLOR7_FRAG, 0);
2310 switch (rdev->family) {
2315 tmp = TC_L2_SIZE(8);
2319 tmp = TC_L2_SIZE(4);
2322 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2325 tmp = TC_L2_SIZE(0);
2328 WREG32(TC_CNTL, tmp);
2330 tmp = RREG32(HDP_HOST_PATH_CNTL);
2331 WREG32(HDP_HOST_PATH_CNTL, tmp);
2333 tmp = RREG32(ARB_POP);
2334 tmp |= ENABLE_TC128;
2335 WREG32(ARB_POP, tmp);
2337 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2338 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2340 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2341 WREG32(VC_ENHANCE, 0);
2346 * Indirect registers accessor
2348 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2350 unsigned long flags;
2353 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2354 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2355 (void)RREG32(PCIE_PORT_INDEX);
2356 r = RREG32(PCIE_PORT_DATA);
2357 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2361 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2363 unsigned long flags;
2365 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2366 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2367 (void)RREG32(PCIE_PORT_INDEX);
2368 WREG32(PCIE_PORT_DATA, (v));
2369 (void)RREG32(PCIE_PORT_DATA);
2370 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2376 void r600_cp_stop(struct radeon_device *rdev)
2378 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2379 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2380 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2381 WREG32(SCRATCH_UMSK, 0);
2382 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2385 int r600_init_microcode(struct radeon_device *rdev)
2387 const char *chip_name;
2388 const char *rlc_chip_name;
2389 const char *smc_chip_name = "RV770";
2390 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2396 switch (rdev->family) {
2399 rlc_chip_name = "R600";
2402 chip_name = "RV610";
2403 rlc_chip_name = "R600";
2406 chip_name = "RV630";
2407 rlc_chip_name = "R600";
2410 chip_name = "RV620";
2411 rlc_chip_name = "R600";
2414 chip_name = "RV635";
2415 rlc_chip_name = "R600";
2418 chip_name = "RV670";
2419 rlc_chip_name = "R600";
2423 chip_name = "RS780";
2424 rlc_chip_name = "R600";
2427 chip_name = "RV770";
2428 rlc_chip_name = "R700";
2429 smc_chip_name = "RV770";
2430 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2433 chip_name = "RV730";
2434 rlc_chip_name = "R700";
2435 smc_chip_name = "RV730";
2436 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2439 chip_name = "RV710";
2440 rlc_chip_name = "R700";
2441 smc_chip_name = "RV710";
2442 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2445 chip_name = "RV730";
2446 rlc_chip_name = "R700";
2447 smc_chip_name = "RV740";
2448 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2451 chip_name = "CEDAR";
2452 rlc_chip_name = "CEDAR";
2453 smc_chip_name = "CEDAR";
2454 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2457 chip_name = "REDWOOD";
2458 rlc_chip_name = "REDWOOD";
2459 smc_chip_name = "REDWOOD";
2460 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2463 chip_name = "JUNIPER";
2464 rlc_chip_name = "JUNIPER";
2465 smc_chip_name = "JUNIPER";
2466 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2470 chip_name = "CYPRESS";
2471 rlc_chip_name = "CYPRESS";
2472 smc_chip_name = "CYPRESS";
2473 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2477 rlc_chip_name = "SUMO";
2481 rlc_chip_name = "SUMO";
2484 chip_name = "SUMO2";
2485 rlc_chip_name = "SUMO";
2490 if (rdev->family >= CHIP_CEDAR) {
2491 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2492 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2493 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2494 } else if (rdev->family >= CHIP_RV770) {
2495 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2496 me_req_size = R700_PM4_UCODE_SIZE * 4;
2497 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2499 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2500 me_req_size = R600_PM4_UCODE_SIZE * 12;
2501 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2504 DRM_INFO("Loading %s Microcode\n", chip_name);
2506 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
2507 err = reject_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2510 if (rdev->pfp_fw->size != pfp_req_size) {
2511 pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
2512 rdev->pfp_fw->size, fw_name);
2517 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
2518 err = reject_firmware(&rdev->me_fw, fw_name, rdev->dev);
2521 if (rdev->me_fw->size != me_req_size) {
2522 pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
2523 rdev->me_fw->size, fw_name);
2527 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", rlc_chip_name);
2528 err = reject_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2531 if (rdev->rlc_fw->size != rlc_req_size) {
2532 pr_err("r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2533 rdev->rlc_fw->size, fw_name);
2537 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2538 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", smc_chip_name);
2539 err = reject_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2541 pr_err("smc: error loading firmware \"%s\"\n", fw_name);
2542 release_firmware(rdev->smc_fw);
2543 rdev->smc_fw = NULL;
2545 } else if (rdev->smc_fw->size != smc_req_size) {
2546 pr_err("smc: Bogus length %zu in firmware \"%s\"\n",
2547 rdev->smc_fw->size, fw_name);
2555 pr_err("r600_cp: Failed to load firmware \"%s\"\n",
2557 release_firmware(rdev->pfp_fw);
2558 rdev->pfp_fw = NULL;
2559 release_firmware(rdev->me_fw);
2561 release_firmware(rdev->rlc_fw);
2562 rdev->rlc_fw = NULL;
2563 release_firmware(rdev->smc_fw);
2564 rdev->smc_fw = NULL;
2569 u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2570 struct radeon_ring *ring)
2574 if (rdev->wb.enabled)
2575 rptr = rdev->wb.wb[ring->rptr_offs/4];
2577 rptr = RREG32(R600_CP_RB_RPTR);
2582 u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2583 struct radeon_ring *ring)
2585 return RREG32(R600_CP_RB_WPTR);
2588 void r600_gfx_set_wptr(struct radeon_device *rdev,
2589 struct radeon_ring *ring)
2591 WREG32(R600_CP_RB_WPTR, ring->wptr);
2592 (void)RREG32(R600_CP_RB_WPTR);
2595 static int r600_cp_load_microcode(struct radeon_device *rdev)
2597 const __be32 *fw_data;
2600 if (!rdev->me_fw || !rdev->pfp_fw)
2609 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2612 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2613 RREG32(GRBM_SOFT_RESET);
2615 WREG32(GRBM_SOFT_RESET, 0);
2617 WREG32(CP_ME_RAM_WADDR, 0);
2619 fw_data = (const __be32 *)rdev->me_fw->data;
2620 WREG32(CP_ME_RAM_WADDR, 0);
2621 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2622 WREG32(CP_ME_RAM_DATA,
2623 be32_to_cpup(fw_data++));
2625 fw_data = (const __be32 *)rdev->pfp_fw->data;
2626 WREG32(CP_PFP_UCODE_ADDR, 0);
2627 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2628 WREG32(CP_PFP_UCODE_DATA,
2629 be32_to_cpup(fw_data++));
2631 WREG32(CP_PFP_UCODE_ADDR, 0);
2632 WREG32(CP_ME_RAM_WADDR, 0);
2633 WREG32(CP_ME_RAM_RADDR, 0);
2637 int r600_cp_start(struct radeon_device *rdev)
2639 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2643 r = radeon_ring_lock(rdev, ring, 7);
2645 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2648 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2649 radeon_ring_write(ring, 0x1);
2650 if (rdev->family >= CHIP_RV770) {
2651 radeon_ring_write(ring, 0x0);
2652 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2654 radeon_ring_write(ring, 0x3);
2655 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2657 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2658 radeon_ring_write(ring, 0);
2659 radeon_ring_write(ring, 0);
2660 radeon_ring_unlock_commit(rdev, ring, false);
2663 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2667 int r600_cp_resume(struct radeon_device *rdev)
2669 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2675 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2676 RREG32(GRBM_SOFT_RESET);
2678 WREG32(GRBM_SOFT_RESET, 0);
2680 /* Set ring buffer size */
2681 rb_bufsz = order_base_2(ring->ring_size / 8);
2682 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2684 tmp |= BUF_SWAP_32BIT;
2686 WREG32(CP_RB_CNTL, tmp);
2687 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2689 /* Set the write pointer delay */
2690 WREG32(CP_RB_WPTR_DELAY, 0);
2692 /* Initialize the ring buffer's read and write pointers */
2693 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2694 WREG32(CP_RB_RPTR_WR, 0);
2696 WREG32(CP_RB_WPTR, ring->wptr);
2698 /* set the wb address whether it's enabled or not */
2699 WREG32(CP_RB_RPTR_ADDR,
2700 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2701 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2702 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2704 if (rdev->wb.enabled)
2705 WREG32(SCRATCH_UMSK, 0xff);
2707 tmp |= RB_NO_UPDATE;
2708 WREG32(SCRATCH_UMSK, 0);
2712 WREG32(CP_RB_CNTL, tmp);
2714 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2715 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2717 r600_cp_start(rdev);
2719 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2721 ring->ready = false;
2725 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2726 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2731 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2736 /* Align ring size */
2737 rb_bufsz = order_base_2(ring_size / 8);
2738 ring_size = (1 << (rb_bufsz + 1)) * 4;
2739 ring->ring_size = ring_size;
2740 ring->align_mask = 16 - 1;
2742 if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2743 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2745 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2746 ring->rptr_save_reg = 0;
2751 void r600_cp_fini(struct radeon_device *rdev)
2753 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2755 radeon_ring_fini(rdev, ring);
2756 radeon_scratch_free(rdev, ring->rptr_save_reg);
2760 * GPU scratch registers helpers function.
2762 void r600_scratch_init(struct radeon_device *rdev)
2766 rdev->scratch.num_reg = 7;
2767 rdev->scratch.reg_base = SCRATCH_REG0;
2768 for (i = 0; i < rdev->scratch.num_reg; i++) {
2769 rdev->scratch.free[i] = true;
2770 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2774 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2781 r = radeon_scratch_get(rdev, &scratch);
2783 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2786 WREG32(scratch, 0xCAFEDEAD);
2787 r = radeon_ring_lock(rdev, ring, 3);
2789 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2790 radeon_scratch_free(rdev, scratch);
2793 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2794 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2795 radeon_ring_write(ring, 0xDEADBEEF);
2796 radeon_ring_unlock_commit(rdev, ring, false);
2797 for (i = 0; i < rdev->usec_timeout; i++) {
2798 tmp = RREG32(scratch);
2799 if (tmp == 0xDEADBEEF)
2803 if (i < rdev->usec_timeout) {
2804 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2806 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2807 ring->idx, scratch, tmp);
2810 radeon_scratch_free(rdev, scratch);
2815 * CP fences/semaphores
2818 void r600_fence_ring_emit(struct radeon_device *rdev,
2819 struct radeon_fence *fence)
2821 struct radeon_ring *ring = &rdev->ring[fence->ring];
2822 u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2823 PACKET3_SH_ACTION_ENA;
2825 if (rdev->family >= CHIP_RV770)
2826 cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2828 if (rdev->wb.use_event) {
2829 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2830 /* flush read cache over gart */
2831 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2832 radeon_ring_write(ring, cp_coher_cntl);
2833 radeon_ring_write(ring, 0xFFFFFFFF);
2834 radeon_ring_write(ring, 0);
2835 radeon_ring_write(ring, 10); /* poll interval */
2836 /* EVENT_WRITE_EOP - flush caches, send int */
2837 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2838 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2839 radeon_ring_write(ring, lower_32_bits(addr));
2840 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2841 radeon_ring_write(ring, fence->seq);
2842 radeon_ring_write(ring, 0);
2844 /* flush read cache over gart */
2845 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2846 radeon_ring_write(ring, cp_coher_cntl);
2847 radeon_ring_write(ring, 0xFFFFFFFF);
2848 radeon_ring_write(ring, 0);
2849 radeon_ring_write(ring, 10); /* poll interval */
2850 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2851 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2852 /* wait for 3D idle clean */
2853 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2854 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2855 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2856 /* Emit fence sequence & fire IRQ */
2857 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2858 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2859 radeon_ring_write(ring, fence->seq);
2860 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2861 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2862 radeon_ring_write(ring, RB_INT_STAT);
2867 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2869 * @rdev: radeon_device pointer
2870 * @ring: radeon ring buffer object
2871 * @semaphore: radeon semaphore object
2872 * @emit_wait: Is this a sempahore wait?
2874 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2875 * from running ahead of semaphore waits.
2877 bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2878 struct radeon_ring *ring,
2879 struct radeon_semaphore *semaphore,
2882 uint64_t addr = semaphore->gpu_addr;
2883 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2885 if (rdev->family < CHIP_CAYMAN)
2886 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2888 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2889 radeon_ring_write(ring, lower_32_bits(addr));
2890 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2892 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2893 if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
2894 /* Prevent the PFP from running ahead of the semaphore wait */
2895 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2896 radeon_ring_write(ring, 0x0);
2903 * r600_copy_cpdma - copy pages using the CP DMA engine
2905 * @rdev: radeon_device pointer
2906 * @src_offset: src GPU address
2907 * @dst_offset: dst GPU address
2908 * @num_gpu_pages: number of GPU pages to xfer
2909 * @fence: radeon fence object
2911 * Copy GPU paging using the CP DMA engine (r6xx+).
2912 * Used by the radeon ttm implementation to move pages if
2913 * registered as the asic copy callback.
2915 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2916 uint64_t src_offset, uint64_t dst_offset,
2917 unsigned num_gpu_pages,
2918 struct reservation_object *resv)
2920 struct radeon_fence *fence;
2921 struct radeon_sync sync;
2922 int ring_index = rdev->asic->copy.blit_ring_index;
2923 struct radeon_ring *ring = &rdev->ring[ring_index];
2924 u32 size_in_bytes, cur_size_in_bytes, tmp;
2928 radeon_sync_create(&sync);
2930 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2931 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2932 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2934 DRM_ERROR("radeon: moving bo (%d).\n", r);
2935 radeon_sync_free(rdev, &sync, NULL);
2939 radeon_sync_resv(rdev, &sync, resv, false);
2940 radeon_sync_rings(rdev, &sync, ring->idx);
2942 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2943 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2944 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2945 for (i = 0; i < num_loops; i++) {
2946 cur_size_in_bytes = size_in_bytes;
2947 if (cur_size_in_bytes > 0x1fffff)
2948 cur_size_in_bytes = 0x1fffff;
2949 size_in_bytes -= cur_size_in_bytes;
2950 tmp = upper_32_bits(src_offset) & 0xff;
2951 if (size_in_bytes == 0)
2952 tmp |= PACKET3_CP_DMA_CP_SYNC;
2953 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2954 radeon_ring_write(ring, lower_32_bits(src_offset));
2955 radeon_ring_write(ring, tmp);
2956 radeon_ring_write(ring, lower_32_bits(dst_offset));
2957 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2958 radeon_ring_write(ring, cur_size_in_bytes);
2959 src_offset += cur_size_in_bytes;
2960 dst_offset += cur_size_in_bytes;
2962 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2963 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2964 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2966 r = radeon_fence_emit(rdev, &fence, ring->idx);
2968 radeon_ring_unlock_undo(rdev, ring);
2969 radeon_sync_free(rdev, &sync, NULL);
2973 radeon_ring_unlock_commit(rdev, ring, false);
2974 radeon_sync_free(rdev, &sync, fence);
2979 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2980 uint32_t tiling_flags, uint32_t pitch,
2981 uint32_t offset, uint32_t obj_size)
2983 /* FIXME: implement */
2987 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2989 /* FIXME: implement */
2992 static void r600_uvd_init(struct radeon_device *rdev)
2999 r = radeon_uvd_init(rdev);
3001 dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
3003 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
3004 * to early fails uvd_v1_0_resume() and thus nothing happens
3005 * there. So it is pointless to try to go through that code
3006 * hence why we disable uvd here.
3011 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
3012 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
3015 static void r600_uvd_start(struct radeon_device *rdev)
3022 r = uvd_v1_0_resume(rdev);
3024 dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
3027 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
3029 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
3035 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3038 static void r600_uvd_resume(struct radeon_device *rdev)
3040 struct radeon_ring *ring;
3043 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
3046 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
3047 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
3049 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
3052 r = uvd_v1_0_init(rdev);
3054 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
3059 static int r600_startup(struct radeon_device *rdev)
3061 struct radeon_ring *ring;
3064 /* enable pcie gen2 link */
3065 r600_pcie_gen2_enable(rdev);
3067 /* scratch needs to be initialized before MC */
3068 r = r600_vram_scratch_init(rdev);
3072 r600_mc_program(rdev);
3074 if (rdev->flags & RADEON_IS_AGP) {
3075 r600_agp_enable(rdev);
3077 r = r600_pcie_gart_enable(rdev);
3081 r600_gpu_init(rdev);
3083 /* allocate wb buffer */
3084 r = radeon_wb_init(rdev);
3088 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3090 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3094 r600_uvd_start(rdev);
3097 if (!rdev->irq.installed) {
3098 r = radeon_irq_kms_init(rdev);
3103 r = r600_irq_init(rdev);
3105 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3106 radeon_irq_kms_fini(rdev);
3111 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3112 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3117 r = r600_cp_load_microcode(rdev);
3120 r = r600_cp_resume(rdev);
3124 r600_uvd_resume(rdev);
3126 r = radeon_ib_pool_init(rdev);
3128 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3132 r = radeon_audio_init(rdev);
3134 DRM_ERROR("radeon: audio init failed\n");
3141 void r600_vga_set_state(struct radeon_device *rdev, bool state)
3145 temp = RREG32(CONFIG_CNTL);
3146 if (state == false) {
3152 WREG32(CONFIG_CNTL, temp);
3155 int r600_resume(struct radeon_device *rdev)
3159 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
3160 * posting will perform necessary task to bring back GPU into good
3164 atom_asic_init(rdev->mode_info.atom_context);
3166 if (rdev->pm.pm_method == PM_METHOD_DPM)
3167 radeon_pm_resume(rdev);
3169 rdev->accel_working = true;
3170 r = r600_startup(rdev);
3172 DRM_ERROR("r600 startup failed on resume\n");
3173 rdev->accel_working = false;
3180 int r600_suspend(struct radeon_device *rdev)
3182 radeon_pm_suspend(rdev);
3183 radeon_audio_fini(rdev);
3185 if (rdev->has_uvd) {
3186 uvd_v1_0_fini(rdev);
3187 radeon_uvd_suspend(rdev);
3189 r600_irq_suspend(rdev);
3190 radeon_wb_disable(rdev);
3191 r600_pcie_gart_disable(rdev);
3196 /* Plan is to move initialization in that function and use
3197 * helper function so that radeon_device_init pretty much
3198 * do nothing more than calling asic specific function. This
3199 * should also allow to remove a bunch of callback function
3202 int r600_init(struct radeon_device *rdev)
3206 if (r600_debugfs_mc_info_init(rdev)) {
3207 DRM_ERROR("Failed to register debugfs file for mc !\n");
3210 if (!radeon_get_bios(rdev)) {
3211 if (ASIC_IS_AVIVO(rdev))
3214 /* Must be an ATOMBIOS */
3215 if (!rdev->is_atom_bios) {
3216 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3219 r = radeon_atombios_init(rdev);
3222 /* Post card if necessary */
3223 if (!radeon_card_posted(rdev)) {
3225 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3228 DRM_INFO("GPU not posted. posting now...\n");
3229 atom_asic_init(rdev->mode_info.atom_context);
3231 /* Initialize scratch registers */
3232 r600_scratch_init(rdev);
3233 /* Initialize surface registers */
3234 radeon_surface_init(rdev);
3235 /* Initialize clocks */
3236 radeon_get_clock_info(rdev->ddev);
3238 r = radeon_fence_driver_init(rdev);
3241 if (rdev->flags & RADEON_IS_AGP) {
3242 r = radeon_agp_init(rdev);
3244 radeon_agp_disable(rdev);
3246 r = r600_mc_init(rdev);
3249 /* Memory manager */
3250 r = radeon_bo_init(rdev);
3254 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3255 r = r600_init_microcode(rdev);
3257 DRM_ERROR("Failed to load firmware!\n");
3262 /* Initialize power management */
3263 radeon_pm_init(rdev);
3265 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3266 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3268 r600_uvd_init(rdev);
3270 rdev->ih.ring_obj = NULL;
3271 r600_ih_ring_init(rdev, 64 * 1024);
3273 r = r600_pcie_gart_init(rdev);
3277 rdev->accel_working = true;
3278 r = r600_startup(rdev);
3280 dev_err(rdev->dev, "disabling GPU acceleration\n");
3282 r600_irq_fini(rdev);
3283 radeon_wb_fini(rdev);
3284 radeon_ib_pool_fini(rdev);
3285 radeon_irq_kms_fini(rdev);
3286 r600_pcie_gart_fini(rdev);
3287 rdev->accel_working = false;
3293 void r600_fini(struct radeon_device *rdev)
3295 radeon_pm_fini(rdev);
3296 radeon_audio_fini(rdev);
3298 r600_irq_fini(rdev);
3299 if (rdev->has_uvd) {
3300 uvd_v1_0_fini(rdev);
3301 radeon_uvd_fini(rdev);
3303 radeon_wb_fini(rdev);
3304 radeon_ib_pool_fini(rdev);
3305 radeon_irq_kms_fini(rdev);
3306 r600_pcie_gart_fini(rdev);
3307 r600_vram_scratch_fini(rdev);
3308 radeon_agp_fini(rdev);
3309 radeon_gem_fini(rdev);
3310 radeon_fence_driver_fini(rdev);
3311 radeon_bo_fini(rdev);
3312 radeon_atombios_fini(rdev);
3321 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3323 struct radeon_ring *ring = &rdev->ring[ib->ring];
3326 if (ring->rptr_save_reg) {
3327 next_rptr = ring->wptr + 3 + 4;
3328 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3329 radeon_ring_write(ring, ((ring->rptr_save_reg -
3330 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3331 radeon_ring_write(ring, next_rptr);
3332 } else if (rdev->wb.enabled) {
3333 next_rptr = ring->wptr + 5 + 4;
3334 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3335 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3336 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3337 radeon_ring_write(ring, next_rptr);
3338 radeon_ring_write(ring, 0);
3341 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3342 radeon_ring_write(ring,
3346 (ib->gpu_addr & 0xFFFFFFFC));
3347 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3348 radeon_ring_write(ring, ib->length_dw);
3351 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3353 struct radeon_ib ib;
3359 r = radeon_scratch_get(rdev, &scratch);
3361 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3364 WREG32(scratch, 0xCAFEDEAD);
3365 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3367 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3370 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3371 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3372 ib.ptr[2] = 0xDEADBEEF;
3374 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3376 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3379 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
3380 RADEON_USEC_IB_TEST_TIMEOUT));
3382 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3384 } else if (r == 0) {
3385 DRM_ERROR("radeon: fence wait timed out.\n");
3390 for (i = 0; i < rdev->usec_timeout; i++) {
3391 tmp = RREG32(scratch);
3392 if (tmp == 0xDEADBEEF)
3396 if (i < rdev->usec_timeout) {
3397 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3399 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3404 radeon_ib_free(rdev, &ib);
3406 radeon_scratch_free(rdev, scratch);
3413 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3414 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3415 * writing to the ring and the GPU consuming, the GPU writes to the ring
3416 * and host consumes. As the host irq handler processes interrupts, it
3417 * increments the rptr. When the rptr catches up with the wptr, all the
3418 * current interrupts have been processed.
3421 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3425 /* Align ring size */
3426 rb_bufsz = order_base_2(ring_size / 4);
3427 ring_size = (1 << rb_bufsz) * 4;
3428 rdev->ih.ring_size = ring_size;
3429 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3433 int r600_ih_ring_alloc(struct radeon_device *rdev)
3437 /* Allocate ring buffer */
3438 if (rdev->ih.ring_obj == NULL) {
3439 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3441 RADEON_GEM_DOMAIN_GTT, 0,
3442 NULL, NULL, &rdev->ih.ring_obj);
3444 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3447 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3448 if (unlikely(r != 0))
3450 r = radeon_bo_pin(rdev->ih.ring_obj,
3451 RADEON_GEM_DOMAIN_GTT,
3452 &rdev->ih.gpu_addr);
3454 radeon_bo_unreserve(rdev->ih.ring_obj);
3455 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3458 r = radeon_bo_kmap(rdev->ih.ring_obj,
3459 (void **)&rdev->ih.ring);
3460 radeon_bo_unreserve(rdev->ih.ring_obj);
3462 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3469 void r600_ih_ring_fini(struct radeon_device *rdev)
3472 if (rdev->ih.ring_obj) {
3473 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3474 if (likely(r == 0)) {
3475 radeon_bo_kunmap(rdev->ih.ring_obj);
3476 radeon_bo_unpin(rdev->ih.ring_obj);
3477 radeon_bo_unreserve(rdev->ih.ring_obj);
3479 radeon_bo_unref(&rdev->ih.ring_obj);
3480 rdev->ih.ring = NULL;
3481 rdev->ih.ring_obj = NULL;
3485 void r600_rlc_stop(struct radeon_device *rdev)
3488 if ((rdev->family >= CHIP_RV770) &&
3489 (rdev->family <= CHIP_RV740)) {
3490 /* r7xx asics need to soft reset RLC before halting */
3491 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3492 RREG32(SRBM_SOFT_RESET);
3494 WREG32(SRBM_SOFT_RESET, 0);
3495 RREG32(SRBM_SOFT_RESET);
3498 WREG32(RLC_CNTL, 0);
3501 static void r600_rlc_start(struct radeon_device *rdev)
3503 WREG32(RLC_CNTL, RLC_ENABLE);
3506 static int r600_rlc_resume(struct radeon_device *rdev)
3509 const __be32 *fw_data;
3514 r600_rlc_stop(rdev);
3516 WREG32(RLC_HB_CNTL, 0);
3518 WREG32(RLC_HB_BASE, 0);
3519 WREG32(RLC_HB_RPTR, 0);
3520 WREG32(RLC_HB_WPTR, 0);
3521 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3522 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3523 WREG32(RLC_MC_CNTL, 0);
3524 WREG32(RLC_UCODE_CNTL, 0);
3526 fw_data = (const __be32 *)rdev->rlc_fw->data;
3527 if (rdev->family >= CHIP_RV770) {
3528 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3529 WREG32(RLC_UCODE_ADDR, i);
3530 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3533 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3534 WREG32(RLC_UCODE_ADDR, i);
3535 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3538 WREG32(RLC_UCODE_ADDR, 0);
3540 r600_rlc_start(rdev);
3545 static void r600_enable_interrupts(struct radeon_device *rdev)
3547 u32 ih_cntl = RREG32(IH_CNTL);
3548 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3550 ih_cntl |= ENABLE_INTR;
3551 ih_rb_cntl |= IH_RB_ENABLE;
3552 WREG32(IH_CNTL, ih_cntl);
3553 WREG32(IH_RB_CNTL, ih_rb_cntl);
3554 rdev->ih.enabled = true;
3557 void r600_disable_interrupts(struct radeon_device *rdev)
3559 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3560 u32 ih_cntl = RREG32(IH_CNTL);
3562 ih_rb_cntl &= ~IH_RB_ENABLE;
3563 ih_cntl &= ~ENABLE_INTR;
3564 WREG32(IH_RB_CNTL, ih_rb_cntl);
3565 WREG32(IH_CNTL, ih_cntl);
3566 /* set rptr, wptr to 0 */
3567 WREG32(IH_RB_RPTR, 0);
3568 WREG32(IH_RB_WPTR, 0);
3569 rdev->ih.enabled = false;
3573 static void r600_disable_interrupt_state(struct radeon_device *rdev)
3577 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3578 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3579 WREG32(DMA_CNTL, tmp);
3580 WREG32(GRBM_INT_CNTL, 0);
3581 WREG32(DxMODE_INT_MASK, 0);
3582 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3583 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3584 if (ASIC_IS_DCE3(rdev)) {
3585 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3586 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3587 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3588 WREG32(DC_HPD1_INT_CONTROL, tmp);
3589 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3590 WREG32(DC_HPD2_INT_CONTROL, tmp);
3591 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3592 WREG32(DC_HPD3_INT_CONTROL, tmp);
3593 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3594 WREG32(DC_HPD4_INT_CONTROL, tmp);
3595 if (ASIC_IS_DCE32(rdev)) {
3596 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3597 WREG32(DC_HPD5_INT_CONTROL, tmp);
3598 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3599 WREG32(DC_HPD6_INT_CONTROL, tmp);
3600 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3601 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3602 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3603 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3605 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3606 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3607 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3608 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3611 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3612 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3613 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3614 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3615 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3616 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3617 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3618 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3619 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3620 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3621 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3622 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3626 int r600_irq_init(struct radeon_device *rdev)
3630 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3633 ret = r600_ih_ring_alloc(rdev);
3638 r600_disable_interrupts(rdev);
3641 if (rdev->family >= CHIP_CEDAR)
3642 ret = evergreen_rlc_resume(rdev);
3644 ret = r600_rlc_resume(rdev);
3646 r600_ih_ring_fini(rdev);
3650 /* setup interrupt control */
3651 /* set dummy read address to ring address */
3652 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3653 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3654 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3655 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3657 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3658 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3659 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3660 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3662 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3663 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3665 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3666 IH_WPTR_OVERFLOW_CLEAR |
3669 if (rdev->wb.enabled)
3670 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3672 /* set the writeback address whether it's enabled or not */
3673 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3674 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3676 WREG32(IH_RB_CNTL, ih_rb_cntl);
3678 /* set rptr, wptr to 0 */
3679 WREG32(IH_RB_RPTR, 0);
3680 WREG32(IH_RB_WPTR, 0);
3682 /* Default settings for IH_CNTL (disabled at first) */
3683 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3684 /* RPTR_REARM only works if msi's are enabled */
3685 if (rdev->msi_enabled)
3686 ih_cntl |= RPTR_REARM;
3687 WREG32(IH_CNTL, ih_cntl);
3689 /* force the active interrupt state to all disabled */
3690 if (rdev->family >= CHIP_CEDAR)
3691 evergreen_disable_interrupt_state(rdev);
3693 r600_disable_interrupt_state(rdev);
3695 /* at this point everything should be setup correctly to enable master */
3696 pci_set_master(rdev->pdev);
3699 r600_enable_interrupts(rdev);
3704 void r600_irq_suspend(struct radeon_device *rdev)
3706 r600_irq_disable(rdev);
3707 r600_rlc_stop(rdev);
3710 void r600_irq_fini(struct radeon_device *rdev)
3712 r600_irq_suspend(rdev);
3713 r600_ih_ring_fini(rdev);
3716 int r600_irq_set(struct radeon_device *rdev)
3718 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3720 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3721 u32 grbm_int_cntl = 0;
3724 u32 thermal_int = 0;
3726 if (!rdev->irq.installed) {
3727 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3730 /* don't enable anything if the ih is disabled */
3731 if (!rdev->ih.enabled) {
3732 r600_disable_interrupts(rdev);
3733 /* force the active interrupt state to all disabled */
3734 r600_disable_interrupt_state(rdev);
3738 if (ASIC_IS_DCE3(rdev)) {
3739 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3740 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3741 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3742 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3743 if (ASIC_IS_DCE32(rdev)) {
3744 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3745 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3746 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3747 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3749 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3750 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3753 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3754 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3755 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3756 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3757 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3760 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3762 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3763 thermal_int = RREG32(CG_THERMAL_INT) &
3764 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3765 } else if (rdev->family >= CHIP_RV770) {
3766 thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3767 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3769 if (rdev->irq.dpm_thermal) {
3770 DRM_DEBUG("dpm thermal\n");
3771 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3774 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3775 DRM_DEBUG("r600_irq_set: sw int\n");
3776 cp_int_cntl |= RB_INT_ENABLE;
3777 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3780 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3781 DRM_DEBUG("r600_irq_set: sw int dma\n");
3782 dma_cntl |= TRAP_ENABLE;
3785 if (rdev->irq.crtc_vblank_int[0] ||
3786 atomic_read(&rdev->irq.pflip[0])) {
3787 DRM_DEBUG("r600_irq_set: vblank 0\n");
3788 mode_int |= D1MODE_VBLANK_INT_MASK;
3790 if (rdev->irq.crtc_vblank_int[1] ||
3791 atomic_read(&rdev->irq.pflip[1])) {
3792 DRM_DEBUG("r600_irq_set: vblank 1\n");
3793 mode_int |= D2MODE_VBLANK_INT_MASK;
3795 if (rdev->irq.hpd[0]) {
3796 DRM_DEBUG("r600_irq_set: hpd 1\n");
3797 hpd1 |= DC_HPDx_INT_EN;
3799 if (rdev->irq.hpd[1]) {
3800 DRM_DEBUG("r600_irq_set: hpd 2\n");
3801 hpd2 |= DC_HPDx_INT_EN;
3803 if (rdev->irq.hpd[2]) {
3804 DRM_DEBUG("r600_irq_set: hpd 3\n");
3805 hpd3 |= DC_HPDx_INT_EN;
3807 if (rdev->irq.hpd[3]) {
3808 DRM_DEBUG("r600_irq_set: hpd 4\n");
3809 hpd4 |= DC_HPDx_INT_EN;
3811 if (rdev->irq.hpd[4]) {
3812 DRM_DEBUG("r600_irq_set: hpd 5\n");
3813 hpd5 |= DC_HPDx_INT_EN;
3815 if (rdev->irq.hpd[5]) {
3816 DRM_DEBUG("r600_irq_set: hpd 6\n");
3817 hpd6 |= DC_HPDx_INT_EN;
3819 if (rdev->irq.afmt[0]) {
3820 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3821 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3823 if (rdev->irq.afmt[1]) {
3824 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3825 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3828 WREG32(CP_INT_CNTL, cp_int_cntl);
3829 WREG32(DMA_CNTL, dma_cntl);
3830 WREG32(DxMODE_INT_MASK, mode_int);
3831 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3832 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3833 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3834 if (ASIC_IS_DCE3(rdev)) {
3835 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3836 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3837 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3838 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3839 if (ASIC_IS_DCE32(rdev)) {
3840 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3841 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3842 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3843 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3845 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3846 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3849 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3850 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3851 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3852 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3853 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3855 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3856 WREG32(CG_THERMAL_INT, thermal_int);
3857 } else if (rdev->family >= CHIP_RV770) {
3858 WREG32(RV770_CG_THERMAL_INT, thermal_int);
3862 RREG32(R_000E50_SRBM_STATUS);
3867 static void r600_irq_ack(struct radeon_device *rdev)
3871 if (ASIC_IS_DCE3(rdev)) {
3872 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3873 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3874 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3875 if (ASIC_IS_DCE32(rdev)) {
3876 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3877 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3879 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3880 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3883 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3884 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3885 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3886 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3887 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3889 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3890 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3892 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3893 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3894 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3895 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3896 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3897 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3898 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3899 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3900 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3901 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3902 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3903 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3904 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3905 if (ASIC_IS_DCE3(rdev)) {
3906 tmp = RREG32(DC_HPD1_INT_CONTROL);
3907 tmp |= DC_HPDx_INT_ACK;
3908 WREG32(DC_HPD1_INT_CONTROL, tmp);
3910 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3911 tmp |= DC_HPDx_INT_ACK;
3912 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3915 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3916 if (ASIC_IS_DCE3(rdev)) {
3917 tmp = RREG32(DC_HPD2_INT_CONTROL);
3918 tmp |= DC_HPDx_INT_ACK;
3919 WREG32(DC_HPD2_INT_CONTROL, tmp);
3921 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3922 tmp |= DC_HPDx_INT_ACK;
3923 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3926 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3927 if (ASIC_IS_DCE3(rdev)) {
3928 tmp = RREG32(DC_HPD3_INT_CONTROL);
3929 tmp |= DC_HPDx_INT_ACK;
3930 WREG32(DC_HPD3_INT_CONTROL, tmp);
3932 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3933 tmp |= DC_HPDx_INT_ACK;
3934 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3937 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3938 tmp = RREG32(DC_HPD4_INT_CONTROL);
3939 tmp |= DC_HPDx_INT_ACK;
3940 WREG32(DC_HPD4_INT_CONTROL, tmp);
3942 if (ASIC_IS_DCE32(rdev)) {
3943 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3944 tmp = RREG32(DC_HPD5_INT_CONTROL);
3945 tmp |= DC_HPDx_INT_ACK;
3946 WREG32(DC_HPD5_INT_CONTROL, tmp);
3948 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3949 tmp = RREG32(DC_HPD6_INT_CONTROL);
3950 tmp |= DC_HPDx_INT_ACK;
3951 WREG32(DC_HPD6_INT_CONTROL, tmp);
3953 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3954 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3955 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3956 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3958 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3959 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3960 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3961 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3964 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3965 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3966 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3967 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3969 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3970 if (ASIC_IS_DCE3(rdev)) {
3971 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3972 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3973 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3975 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3976 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3977 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3983 void r600_irq_disable(struct radeon_device *rdev)
3985 r600_disable_interrupts(rdev);
3986 /* Wait and acknowledge irq */
3989 r600_disable_interrupt_state(rdev);
3992 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3996 if (rdev->wb.enabled)
3997 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3999 wptr = RREG32(IH_RB_WPTR);
4001 if (wptr & RB_OVERFLOW) {
4002 wptr &= ~RB_OVERFLOW;
4003 /* When a ring buffer overflow happen start parsing interrupt
4004 * from the last not overwritten vector (wptr + 16). Hopefully
4005 * this should allow us to catchup.
4007 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
4008 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
4009 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
4010 tmp = RREG32(IH_RB_CNTL);
4011 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4012 WREG32(IH_RB_CNTL, tmp);
4014 return (wptr & rdev->ih.ptr_mask);
4018 * Each IV ring entry is 128 bits:
4019 * [7:0] - interrupt source id
4021 * [59:32] - interrupt source data
4022 * [127:60] - reserved
4024 * The basic interrupt vector entries
4025 * are decoded as follows:
4026 * src_id src_data description
4031 * 19 0 FP Hot plug detection A
4032 * 19 1 FP Hot plug detection B
4033 * 19 2 DAC A auto-detection
4034 * 19 3 DAC B auto-detection
4040 * 181 - EOP Interrupt
4043 * Note, these are based on r600 and may need to be
4044 * adjusted or added to on newer asics
4047 int r600_irq_process(struct radeon_device *rdev)
4051 u32 src_id, src_data;
4053 bool queue_hotplug = false;
4054 bool queue_hdmi = false;
4055 bool queue_thermal = false;
4057 if (!rdev->ih.enabled || rdev->shutdown)
4060 /* No MSIs, need a dummy read to flush PCI DMAs */
4061 if (!rdev->msi_enabled)
4064 wptr = r600_get_ih_wptr(rdev);
4067 /* is somebody else already processing irqs? */
4068 if (atomic_xchg(&rdev->ih.lock, 1))
4071 rptr = rdev->ih.rptr;
4072 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4074 /* Order reading of wptr vs. reading of IH ring data */
4077 /* display interrupts */
4080 while (rptr != wptr) {
4081 /* wptr/rptr are in bytes! */
4082 ring_index = rptr / 4;
4083 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4084 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
4087 case 1: /* D1 vblank/vline */
4089 case 0: /* D1 vblank */
4090 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4091 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4093 if (rdev->irq.crtc_vblank_int[0]) {
4094 drm_handle_vblank(rdev->ddev, 0);
4095 rdev->pm.vblank_sync = true;
4096 wake_up(&rdev->irq.vblank_queue);
4098 if (atomic_read(&rdev->irq.pflip[0]))
4099 radeon_crtc_handle_vblank(rdev, 0);
4100 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4101 DRM_DEBUG("IH: D1 vblank\n");
4104 case 1: /* D1 vline */
4105 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4106 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4108 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4109 DRM_DEBUG("IH: D1 vline\n");
4113 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4117 case 5: /* D2 vblank/vline */
4119 case 0: /* D2 vblank */
4120 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4121 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4123 if (rdev->irq.crtc_vblank_int[1]) {
4124 drm_handle_vblank(rdev->ddev, 1);
4125 rdev->pm.vblank_sync = true;
4126 wake_up(&rdev->irq.vblank_queue);
4128 if (atomic_read(&rdev->irq.pflip[1]))
4129 radeon_crtc_handle_vblank(rdev, 1);
4130 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4131 DRM_DEBUG("IH: D2 vblank\n");
4134 case 1: /* D1 vline */
4135 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4136 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4138 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4139 DRM_DEBUG("IH: D2 vline\n");
4143 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4147 case 9: /* D1 pflip */
4148 DRM_DEBUG("IH: D1 flip\n");
4149 if (radeon_use_pflipirq > 0)
4150 radeon_crtc_handle_flip(rdev, 0);
4152 case 11: /* D2 pflip */
4153 DRM_DEBUG("IH: D2 flip\n");
4154 if (radeon_use_pflipirq > 0)
4155 radeon_crtc_handle_flip(rdev, 1);
4157 case 19: /* HPD/DAC hotplug */
4160 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4161 DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4163 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4164 queue_hotplug = true;
4165 DRM_DEBUG("IH: HPD1\n");
4168 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4169 DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4171 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4172 queue_hotplug = true;
4173 DRM_DEBUG("IH: HPD2\n");
4176 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4177 DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4179 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4180 queue_hotplug = true;
4181 DRM_DEBUG("IH: HPD3\n");
4184 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4185 DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4187 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4188 queue_hotplug = true;
4189 DRM_DEBUG("IH: HPD4\n");
4192 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4193 DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4195 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4196 queue_hotplug = true;
4197 DRM_DEBUG("IH: HPD5\n");
4200 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4201 DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4203 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4204 queue_hotplug = true;
4205 DRM_DEBUG("IH: HPD6\n");
4209 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4216 if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4217 DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4219 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4221 DRM_DEBUG("IH: HDMI0\n");
4225 if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4226 DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4228 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4230 DRM_DEBUG("IH: HDMI1\n");
4234 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4239 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4240 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4242 case 176: /* CP_INT in ring buffer */
4243 case 177: /* CP_INT in IB1 */
4244 case 178: /* CP_INT in IB2 */
4245 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4246 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4248 case 181: /* CP EOP event */
4249 DRM_DEBUG("IH: CP EOP\n");
4250 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4252 case 224: /* DMA trap event */
4253 DRM_DEBUG("IH: DMA trap\n");
4254 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4256 case 230: /* thermal low to high */
4257 DRM_DEBUG("IH: thermal low to high\n");
4258 rdev->pm.dpm.thermal.high_to_low = false;
4259 queue_thermal = true;
4261 case 231: /* thermal high to low */
4262 DRM_DEBUG("IH: thermal high to low\n");
4263 rdev->pm.dpm.thermal.high_to_low = true;
4264 queue_thermal = true;
4266 case 233: /* GUI IDLE */
4267 DRM_DEBUG("IH: GUI idle\n");
4270 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4274 /* wptr/rptr are in bytes! */
4276 rptr &= rdev->ih.ptr_mask;
4277 WREG32(IH_RB_RPTR, rptr);
4280 schedule_delayed_work(&rdev->hotplug_work, 0);
4282 schedule_work(&rdev->audio_work);
4283 if (queue_thermal && rdev->pm.dpm_enabled)
4284 schedule_work(&rdev->pm.dpm.thermal.work);
4285 rdev->ih.rptr = rptr;
4286 atomic_set(&rdev->ih.lock, 0);
4288 /* make sure wptr hasn't changed while processing */
4289 wptr = r600_get_ih_wptr(rdev);
4299 #if defined(CONFIG_DEBUG_FS)
4301 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4303 struct drm_info_node *node = (struct drm_info_node *) m->private;
4304 struct drm_device *dev = node->minor->dev;
4305 struct radeon_device *rdev = dev->dev_private;
4307 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4308 DREG32_SYS(m, rdev, VM_L2_STATUS);
4312 static struct drm_info_list r600_mc_info_list[] = {
4313 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4317 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4319 #if defined(CONFIG_DEBUG_FS)
4320 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4327 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
4328 * rdev: radeon device structure
4330 * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4331 * through the ring buffer. This leads to corruption in rendering, see
4332 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4333 * directly perform the HDP flush by writing the register through MMIO.
4335 void r600_mmio_hdp_flush(struct radeon_device *rdev)
4337 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4338 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4339 * This seems to cause problems on some AGP cards. Just use the old
4342 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4343 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4344 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4347 WREG32(HDP_DEBUG1, 0);
4348 tmp = readl((void __iomem *)ptr);
4350 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4353 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4355 u32 link_width_cntl, mask;
4357 if (rdev->flags & RADEON_IS_IGP)
4360 if (!(rdev->flags & RADEON_IS_PCIE))
4363 /* x2 cards have a special sequence */
4364 if (ASIC_IS_X2(rdev))
4367 radeon_gui_idle(rdev);
4371 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4374 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4377 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4380 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4383 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4386 /* not actually supported */
4387 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4390 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4393 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4397 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4398 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4399 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4400 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4401 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4403 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4406 int r600_get_pcie_lanes(struct radeon_device *rdev)
4408 u32 link_width_cntl;
4410 if (rdev->flags & RADEON_IS_IGP)
4413 if (!(rdev->flags & RADEON_IS_PCIE))
4416 /* x2 cards have a special sequence */
4417 if (ASIC_IS_X2(rdev))
4420 radeon_gui_idle(rdev);
4422 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4424 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4425 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4427 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4429 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4431 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4433 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4434 /* not actually supported */
4436 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4437 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4443 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4445 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4448 if (radeon_pcie_gen2 == 0)
4451 if (rdev->flags & RADEON_IS_IGP)
4454 if (!(rdev->flags & RADEON_IS_PCIE))
4457 /* x2 cards have a special sequence */
4458 if (ASIC_IS_X2(rdev))
4461 /* only RV6xx+ chips are supported */
4462 if (rdev->family <= CHIP_R600)
4465 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4466 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
4469 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4470 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4471 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4475 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4477 /* 55 nm r6xx asics */
4478 if ((rdev->family == CHIP_RV670) ||
4479 (rdev->family == CHIP_RV620) ||
4480 (rdev->family == CHIP_RV635)) {
4481 /* advertise upconfig capability */
4482 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4483 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4484 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4485 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4486 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4487 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4488 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4489 LC_RECONFIG_ARC_MISSING_ESCAPE);
4490 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4491 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4493 link_width_cntl |= LC_UPCONFIGURE_DIS;
4494 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4498 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4499 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4500 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4502 /* 55 nm r6xx asics */
4503 if ((rdev->family == CHIP_RV670) ||
4504 (rdev->family == CHIP_RV620) ||
4505 (rdev->family == CHIP_RV635)) {
4506 WREG32(MM_CFGREGS_CNTL, 0x8);
4507 link_cntl2 = RREG32(0x4088);
4508 WREG32(MM_CFGREGS_CNTL, 0);
4509 /* not supported yet */
4510 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4514 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4515 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4516 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4517 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4518 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4519 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4521 tmp = RREG32(0x541c);
4522 WREG32(0x541c, tmp | 0x8);
4523 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4524 link_cntl2 = RREG16(0x4088);
4525 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4527 WREG16(0x4088, link_cntl2);
4528 WREG32(MM_CFGREGS_CNTL, 0);
4530 if ((rdev->family == CHIP_RV670) ||
4531 (rdev->family == CHIP_RV620) ||
4532 (rdev->family == CHIP_RV635)) {
4533 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4534 training_cntl &= ~LC_POINT_7_PLUS_EN;
4535 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4537 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4538 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4539 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4542 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4543 speed_cntl |= LC_GEN2_EN_STRAP;
4544 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4547 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4548 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4550 link_width_cntl |= LC_UPCONFIGURE_DIS;
4552 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4553 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4558 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4560 * @rdev: radeon_device pointer
4562 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4563 * Returns the 64 bit clock counter snapshot.
4565 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4569 mutex_lock(&rdev->gpu_clock_mutex);
4570 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4571 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4572 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4573 mutex_unlock(&rdev->gpu_clock_mutex);