1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "drmP.h" 24 #include "amdgpu.h" 25 #include "amdgpu_pm.h" 26 #include "amdgpu_i2c.h" 27 #include "cikd.h" 28 #include "atom.h" 29 #include "amdgpu_atombios.h" 30 #include "atombios_crtc.h" 31 #include "atombios_encoders.h" 32 #include "amdgpu_pll.h" 33 #include "amdgpu_connectors.h" 34 35 #include "dce/dce_8_0_d.h" 36 #include "dce/dce_8_0_sh_mask.h" 37 38 #include "gca/gfx_7_2_enum.h" 39 40 #include "gmc/gmc_7_1_d.h" 41 #include "gmc/gmc_7_1_sh_mask.h" 42 43 #include "oss/oss_2_0_d.h" 44 #include "oss/oss_2_0_sh_mask.h" 45 46 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev); 47 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev); 48 49 static const u32 crtc_offsets[6] = 50 { 51 CRTC0_REGISTER_OFFSET, 52 CRTC1_REGISTER_OFFSET, 53 CRTC2_REGISTER_OFFSET, 54 CRTC3_REGISTER_OFFSET, 55 CRTC4_REGISTER_OFFSET, 56 CRTC5_REGISTER_OFFSET 57 }; 58 59 static const uint32_t dig_offsets[] = { 60 CRTC0_REGISTER_OFFSET, 61 CRTC1_REGISTER_OFFSET, 62 CRTC2_REGISTER_OFFSET, 63 CRTC3_REGISTER_OFFSET, 64 CRTC4_REGISTER_OFFSET, 65 CRTC5_REGISTER_OFFSET, 66 (0x13830 - 0x7030) >> 2, 67 }; 68 69 static const struct { 70 uint32_t reg; 71 uint32_t vblank; 72 uint32_t vline; 73 uint32_t hpd; 74 75 } interrupt_status_offsets[6] = { { 76 .reg = mmDISP_INTERRUPT_STATUS, 77 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 78 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 79 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 80 }, { 81 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 82 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 83 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 84 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 85 }, { 86 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 87 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 88 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 89 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 90 }, { 91 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 92 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 93 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 94 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 95 }, { 96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 98 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 100 }, { 101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 103 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 105 } }; 106 107 static const uint32_t hpd_int_control_offsets[6] = { 108 mmDC_HPD1_INT_CONTROL, 109 mmDC_HPD2_INT_CONTROL, 110 mmDC_HPD3_INT_CONTROL, 111 mmDC_HPD4_INT_CONTROL, 112 mmDC_HPD5_INT_CONTROL, 113 mmDC_HPD6_INT_CONTROL, 114 }; 115 116 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, 117 u32 block_offset, u32 reg) 118 { 119 unsigned long flags; 120 u32 r; 121 122 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 123 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 124 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 125 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 126 127 return r; 128 } 129 130 static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev, 131 u32 block_offset, u32 reg, u32 v) 132 { 133 unsigned long flags; 134 135 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 136 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 137 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 138 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 139 } 140 141 static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc) 142 { 143 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & 144 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) 145 return true; 146 else 147 return false; 148 } 149 150 static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc) 151 { 152 u32 pos1, pos2; 153 154 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 155 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 156 157 if (pos1 != pos2) 158 return true; 159 else 160 return false; 161 } 162 163 /** 164 * dce_v8_0_vblank_wait - vblank wait asic callback. 165 * 166 * @adev: amdgpu_device pointer 167 * @crtc: crtc to wait for vblank on 168 * 169 * Wait for vblank on the requested crtc (evergreen+). 170 */ 171 static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) 172 { 173 unsigned i = 0; 174 175 if (crtc >= adev->mode_info.num_crtc) 176 return; 177 178 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) 179 return; 180 181 /* depending on when we hit vblank, we may be close to active; if so, 182 * wait for another frame. 183 */ 184 while (dce_v8_0_is_in_vblank(adev, crtc)) { 185 if (i++ % 100 == 0) { 186 if (!dce_v8_0_is_counter_moving(adev, crtc)) 187 break; 188 } 189 } 190 191 while (!dce_v8_0_is_in_vblank(adev, crtc)) { 192 if (i++ % 100 == 0) { 193 if (!dce_v8_0_is_counter_moving(adev, crtc)) 194 break; 195 } 196 } 197 } 198 199 static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 200 { 201 if (crtc >= adev->mode_info.num_crtc) 202 return 0; 203 else 204 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 205 } 206 207 static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev) 208 { 209 unsigned i; 210 211 /* Enable pflip interrupts */ 212 for (i = 0; i < adev->mode_info.num_crtc; i++) 213 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 214 } 215 216 static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 217 { 218 unsigned i; 219 220 /* Disable pflip interrupts */ 221 for (i = 0; i < adev->mode_info.num_crtc; i++) 222 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 223 } 224 225 /** 226 * dce_v8_0_page_flip - pageflip callback. 227 * 228 * @adev: amdgpu_device pointer 229 * @crtc_id: crtc to cleanup pageflip on 230 * @crtc_base: new address of the crtc (GPU MC address) 231 * 232 * Does the actual pageflip (evergreen+). 233 * During vblank we take the crtc lock and wait for the update_pending 234 * bit to go high, when it does, we release the lock, and allow the 235 * double buffered update to take place. 236 * Returns the current update pending status. 237 */ 238 static void dce_v8_0_page_flip(struct amdgpu_device *adev, 239 int crtc_id, u64 crtc_base) 240 { 241 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 242 u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset); 243 int i; 244 245 /* Lock the graphics update lock */ 246 tmp |= GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK; 247 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); 248 249 /* update the scanout addresses */ 250 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 251 upper_32_bits(crtc_base)); 252 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 253 (u32)crtc_base); 254 255 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 256 upper_32_bits(crtc_base)); 257 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 258 (u32)crtc_base); 259 260 /* Wait for update_pending to go high. */ 261 for (i = 0; i < adev->usec_timeout; i++) { 262 if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) & 263 GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) 264 break; 265 udelay(1); 266 } 267 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 268 269 /* Unlock the lock, so double-buffering can take place inside vblank */ 270 tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK; 271 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); 272 } 273 274 static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 275 u32 *vbl, u32 *position) 276 { 277 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 278 return -EINVAL; 279 280 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 281 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 282 283 return 0; 284 } 285 286 /** 287 * dce_v8_0_hpd_sense - hpd sense callback. 288 * 289 * @adev: amdgpu_device pointer 290 * @hpd: hpd (hotplug detect) pin 291 * 292 * Checks if a digital monitor is connected (evergreen+). 293 * Returns true if connected, false if not connected. 294 */ 295 static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev, 296 enum amdgpu_hpd_id hpd) 297 { 298 bool connected = false; 299 300 switch (hpd) { 301 case AMDGPU_HPD_1: 302 if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) 303 connected = true; 304 break; 305 case AMDGPU_HPD_2: 306 if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK) 307 connected = true; 308 break; 309 case AMDGPU_HPD_3: 310 if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK) 311 connected = true; 312 break; 313 case AMDGPU_HPD_4: 314 if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK) 315 connected = true; 316 break; 317 case AMDGPU_HPD_5: 318 if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK) 319 connected = true; 320 break; 321 case AMDGPU_HPD_6: 322 if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK) 323 connected = true; 324 break; 325 default: 326 break; 327 } 328 329 return connected; 330 } 331 332 /** 333 * dce_v8_0_hpd_set_polarity - hpd set polarity callback. 334 * 335 * @adev: amdgpu_device pointer 336 * @hpd: hpd (hotplug detect) pin 337 * 338 * Set the polarity of the hpd pin (evergreen+). 339 */ 340 static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, 341 enum amdgpu_hpd_id hpd) 342 { 343 u32 tmp; 344 bool connected = dce_v8_0_hpd_sense(adev, hpd); 345 346 switch (hpd) { 347 case AMDGPU_HPD_1: 348 tmp = RREG32(mmDC_HPD1_INT_CONTROL); 349 if (connected) 350 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 351 else 352 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 353 WREG32(mmDC_HPD1_INT_CONTROL, tmp); 354 break; 355 case AMDGPU_HPD_2: 356 tmp = RREG32(mmDC_HPD2_INT_CONTROL); 357 if (connected) 358 tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; 359 else 360 tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; 361 WREG32(mmDC_HPD2_INT_CONTROL, tmp); 362 break; 363 case AMDGPU_HPD_3: 364 tmp = RREG32(mmDC_HPD3_INT_CONTROL); 365 if (connected) 366 tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; 367 else 368 tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; 369 WREG32(mmDC_HPD3_INT_CONTROL, tmp); 370 break; 371 case AMDGPU_HPD_4: 372 tmp = RREG32(mmDC_HPD4_INT_CONTROL); 373 if (connected) 374 tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; 375 else 376 tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; 377 WREG32(mmDC_HPD4_INT_CONTROL, tmp); 378 break; 379 case AMDGPU_HPD_5: 380 tmp = RREG32(mmDC_HPD5_INT_CONTROL); 381 if (connected) 382 tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; 383 else 384 tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; 385 WREG32(mmDC_HPD5_INT_CONTROL, tmp); 386 break; 387 case AMDGPU_HPD_6: 388 tmp = RREG32(mmDC_HPD6_INT_CONTROL); 389 if (connected) 390 tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; 391 else 392 tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; 393 WREG32(mmDC_HPD6_INT_CONTROL, tmp); 394 break; 395 default: 396 break; 397 } 398 } 399 400 /** 401 * dce_v8_0_hpd_init - hpd setup callback. 402 * 403 * @adev: amdgpu_device pointer 404 * 405 * Setup the hpd pins used by the card (evergreen+). 406 * Enable the pin, set the polarity, and enable the hpd interrupts. 407 */ 408 static void dce_v8_0_hpd_init(struct amdgpu_device *adev) 409 { 410 struct drm_device *dev = adev->ddev; 411 struct drm_connector *connector; 412 u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) | 413 (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) | 414 DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 415 416 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 417 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 418 419 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 420 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 421 /* don't try to enable hpd on eDP or LVDS avoid breaking the 422 * aux dp channel on imac and help (but not completely fix) 423 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 424 * also avoid interrupt storms during dpms. 425 */ 426 continue; 427 } 428 switch (amdgpu_connector->hpd.hpd) { 429 case AMDGPU_HPD_1: 430 WREG32(mmDC_HPD1_CONTROL, tmp); 431 break; 432 case AMDGPU_HPD_2: 433 WREG32(mmDC_HPD2_CONTROL, tmp); 434 break; 435 case AMDGPU_HPD_3: 436 WREG32(mmDC_HPD3_CONTROL, tmp); 437 break; 438 case AMDGPU_HPD_4: 439 WREG32(mmDC_HPD4_CONTROL, tmp); 440 break; 441 case AMDGPU_HPD_5: 442 WREG32(mmDC_HPD5_CONTROL, tmp); 443 break; 444 case AMDGPU_HPD_6: 445 WREG32(mmDC_HPD6_CONTROL, tmp); 446 break; 447 default: 448 break; 449 } 450 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 451 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 452 } 453 } 454 455 /** 456 * dce_v8_0_hpd_fini - hpd tear down callback. 457 * 458 * @adev: amdgpu_device pointer 459 * 460 * Tear down the hpd pins used by the card (evergreen+). 461 * Disable the hpd interrupts. 462 */ 463 static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) 464 { 465 struct drm_device *dev = adev->ddev; 466 struct drm_connector *connector; 467 468 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 469 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 470 471 switch (amdgpu_connector->hpd.hpd) { 472 case AMDGPU_HPD_1: 473 WREG32(mmDC_HPD1_CONTROL, 0); 474 break; 475 case AMDGPU_HPD_2: 476 WREG32(mmDC_HPD2_CONTROL, 0); 477 break; 478 case AMDGPU_HPD_3: 479 WREG32(mmDC_HPD3_CONTROL, 0); 480 break; 481 case AMDGPU_HPD_4: 482 WREG32(mmDC_HPD4_CONTROL, 0); 483 break; 484 case AMDGPU_HPD_5: 485 WREG32(mmDC_HPD5_CONTROL, 0); 486 break; 487 case AMDGPU_HPD_6: 488 WREG32(mmDC_HPD6_CONTROL, 0); 489 break; 490 default: 491 break; 492 } 493 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 494 } 495 } 496 497 static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 498 { 499 return mmDC_GPIO_HPD_A; 500 } 501 502 static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev) 503 { 504 u32 crtc_hung = 0; 505 u32 crtc_status[6]; 506 u32 i, j, tmp; 507 508 for (i = 0; i < adev->mode_info.num_crtc; i++) { 509 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) { 510 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 511 crtc_hung |= (1 << i); 512 } 513 } 514 515 for (j = 0; j < 10; j++) { 516 for (i = 0; i < adev->mode_info.num_crtc; i++) { 517 if (crtc_hung & (1 << i)) { 518 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 519 if (tmp != crtc_status[i]) 520 crtc_hung &= ~(1 << i); 521 } 522 } 523 if (crtc_hung == 0) 524 return false; 525 udelay(100); 526 } 527 528 return true; 529 } 530 531 static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, 532 struct amdgpu_mode_mc_save *save) 533 { 534 u32 crtc_enabled, tmp; 535 int i; 536 537 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 538 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); 539 540 /* disable VGA render */ 541 tmp = RREG32(mmVGA_RENDER_CONTROL); 542 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 543 WREG32(mmVGA_RENDER_CONTROL, tmp); 544 545 /* blank the display controllers */ 546 for (i = 0; i < adev->mode_info.num_crtc; i++) { 547 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), 548 CRTC_CONTROL, CRTC_MASTER_EN); 549 if (crtc_enabled) { 550 #if 0 551 u32 frame_count; 552 int j; 553 554 save->crtc_enabled[i] = true; 555 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 556 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { 557 amdgpu_display_vblank_wait(adev, i); 558 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 559 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); 560 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 561 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 562 } 563 /* wait for the next frame */ 564 frame_count = amdgpu_display_vblank_get_counter(adev, i); 565 for (j = 0; j < adev->usec_timeout; j++) { 566 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) 567 break; 568 udelay(1); 569 } 570 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 571 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { 572 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); 573 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 574 } 575 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); 576 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { 577 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); 578 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 579 } 580 #else 581 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 582 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 583 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 584 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); 585 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 586 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 587 save->crtc_enabled[i] = false; 588 /* ***** */ 589 #endif 590 } else { 591 save->crtc_enabled[i] = false; 592 } 593 } 594 } 595 596 static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, 597 struct amdgpu_mode_mc_save *save) 598 { 599 u32 tmp, frame_count; 600 int i, j; 601 602 /* update crtc base addresses */ 603 for (i = 0; i < adev->mode_info.num_crtc; i++) { 604 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 605 upper_32_bits(adev->mc.vram_start)); 606 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 607 upper_32_bits(adev->mc.vram_start)); 608 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 609 (u32)adev->mc.vram_start); 610 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 611 (u32)adev->mc.vram_start); 612 613 if (save->crtc_enabled[i]) { 614 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); 615 if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { 616 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); 617 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); 618 } 619 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 620 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { 621 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); 622 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 623 } 624 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); 625 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { 626 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); 627 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 628 } 629 for (j = 0; j < adev->usec_timeout; j++) { 630 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 631 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) 632 break; 633 udelay(1); 634 } 635 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 636 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); 637 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 638 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 639 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 640 /* wait for the next frame */ 641 frame_count = amdgpu_display_vblank_get_counter(adev, i); 642 for (j = 0; j < adev->usec_timeout; j++) { 643 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) 644 break; 645 udelay(1); 646 } 647 } 648 } 649 650 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); 651 WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); 652 653 /* Unlock vga access */ 654 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); 655 mdelay(1); 656 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); 657 } 658 659 static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, 660 bool render) 661 { 662 u32 tmp; 663 664 /* Lockout access through VGA aperture*/ 665 tmp = RREG32(mmVGA_HDP_CONTROL); 666 if (render) 667 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); 668 else 669 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 670 WREG32(mmVGA_HDP_CONTROL, tmp); 671 672 /* disable VGA render */ 673 tmp = RREG32(mmVGA_RENDER_CONTROL); 674 if (render) 675 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); 676 else 677 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 678 WREG32(mmVGA_RENDER_CONTROL, tmp); 679 } 680 681 static void dce_v8_0_program_fmt(struct drm_encoder *encoder) 682 { 683 struct drm_device *dev = encoder->dev; 684 struct amdgpu_device *adev = dev->dev_private; 685 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 686 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 687 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 688 int bpc = 0; 689 u32 tmp = 0; 690 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 691 692 if (connector) { 693 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 694 bpc = amdgpu_connector_get_monitor_bpc(connector); 695 dither = amdgpu_connector->dither; 696 } 697 698 /* LVDS/eDP FMT is set up by atom */ 699 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 700 return; 701 702 /* not needed for analog */ 703 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 704 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 705 return; 706 707 if (bpc == 0) 708 return; 709 710 switch (bpc) { 711 case 6: 712 if (dither == AMDGPU_FMT_DITHER_ENABLE) 713 /* XXX sort out optimal dither settings */ 714 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 715 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 716 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 717 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 718 else 719 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 720 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 721 break; 722 case 8: 723 if (dither == AMDGPU_FMT_DITHER_ENABLE) 724 /* XXX sort out optimal dither settings */ 725 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 726 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 727 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 728 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 729 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 730 else 731 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 732 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 733 break; 734 case 10: 735 if (dither == AMDGPU_FMT_DITHER_ENABLE) 736 /* XXX sort out optimal dither settings */ 737 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 738 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 739 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 740 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 741 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 742 else 743 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 744 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 745 break; 746 default: 747 /* not needed */ 748 break; 749 } 750 751 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 752 } 753 754 755 /* display watermark setup */ 756 /** 757 * dce_v8_0_line_buffer_adjust - Set up the line buffer 758 * 759 * @adev: amdgpu_device pointer 760 * @amdgpu_crtc: the selected display controller 761 * @mode: the current display mode on the selected display 762 * controller 763 * 764 * Setup up the line buffer allocation for 765 * the selected display controller (CIK). 766 * Returns the line buffer size in pixels. 767 */ 768 static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev, 769 struct amdgpu_crtc *amdgpu_crtc, 770 struct drm_display_mode *mode) 771 { 772 u32 tmp, buffer_alloc, i; 773 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8; 774 /* 775 * Line Buffer Setup 776 * There are 6 line buffers, one for each display controllers. 777 * There are 3 partitions per LB. Select the number of partitions 778 * to enable based on the display width. For display widths larger 779 * than 4096, you need use to use 2 display controllers and combine 780 * them using the stereo blender. 781 */ 782 if (amdgpu_crtc->base.enabled && mode) { 783 if (mode->crtc_hdisplay < 1920) { 784 tmp = 1; 785 buffer_alloc = 2; 786 } else if (mode->crtc_hdisplay < 2560) { 787 tmp = 2; 788 buffer_alloc = 2; 789 } else if (mode->crtc_hdisplay < 4096) { 790 tmp = 0; 791 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 792 } else { 793 DRM_DEBUG_KMS("Mode too big for LB!\n"); 794 tmp = 0; 795 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 796 } 797 } else { 798 tmp = 1; 799 buffer_alloc = 0; 800 } 801 802 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, 803 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) | 804 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT)); 805 806 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, 807 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); 808 for (i = 0; i < adev->usec_timeout; i++) { 809 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & 810 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK) 811 break; 812 udelay(1); 813 } 814 815 if (amdgpu_crtc->base.enabled && mode) { 816 switch (tmp) { 817 case 0: 818 default: 819 return 4096 * 2; 820 case 1: 821 return 1920 * 2; 822 case 2: 823 return 2560 * 2; 824 } 825 } 826 827 /* controller not enabled, so no lb used */ 828 return 0; 829 } 830 831 /** 832 * cik_get_number_of_dram_channels - get the number of dram channels 833 * 834 * @adev: amdgpu_device pointer 835 * 836 * Look up the number of video ram channels (CIK). 837 * Used for display watermark bandwidth calculations 838 * Returns the number of dram channels 839 */ 840 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) 841 { 842 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 843 844 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { 845 case 0: 846 default: 847 return 1; 848 case 1: 849 return 2; 850 case 2: 851 return 4; 852 case 3: 853 return 8; 854 case 4: 855 return 3; 856 case 5: 857 return 6; 858 case 6: 859 return 10; 860 case 7: 861 return 12; 862 case 8: 863 return 16; 864 } 865 } 866 867 struct dce8_wm_params { 868 u32 dram_channels; /* number of dram channels */ 869 u32 yclk; /* bandwidth per dram data pin in kHz */ 870 u32 sclk; /* engine clock in kHz */ 871 u32 disp_clk; /* display clock in kHz */ 872 u32 src_width; /* viewport width */ 873 u32 active_time; /* active display time in ns */ 874 u32 blank_time; /* blank time in ns */ 875 bool interlaced; /* mode is interlaced */ 876 fixed20_12 vsc; /* vertical scale ratio */ 877 u32 num_heads; /* number of active crtcs */ 878 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 879 u32 lb_size; /* line buffer allocated to pipe */ 880 u32 vtaps; /* vertical scaler taps */ 881 }; 882 883 /** 884 * dce_v8_0_dram_bandwidth - get the dram bandwidth 885 * 886 * @wm: watermark calculation data 887 * 888 * Calculate the raw dram bandwidth (CIK). 889 * Used for display watermark bandwidth calculations 890 * Returns the dram bandwidth in MBytes/s 891 */ 892 static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm) 893 { 894 /* Calculate raw DRAM Bandwidth */ 895 fixed20_12 dram_efficiency; /* 0.7 */ 896 fixed20_12 yclk, dram_channels, bandwidth; 897 fixed20_12 a; 898 899 a.full = dfixed_const(1000); 900 yclk.full = dfixed_const(wm->yclk); 901 yclk.full = dfixed_div(yclk, a); 902 dram_channels.full = dfixed_const(wm->dram_channels * 4); 903 a.full = dfixed_const(10); 904 dram_efficiency.full = dfixed_const(7); 905 dram_efficiency.full = dfixed_div(dram_efficiency, a); 906 bandwidth.full = dfixed_mul(dram_channels, yclk); 907 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 908 909 return dfixed_trunc(bandwidth); 910 } 911 912 /** 913 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display 914 * 915 * @wm: watermark calculation data 916 * 917 * Calculate the dram bandwidth used for display (CIK). 918 * Used for display watermark bandwidth calculations 919 * Returns the dram bandwidth for display in MBytes/s 920 */ 921 static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm) 922 { 923 /* Calculate DRAM Bandwidth and the part allocated to display. */ 924 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 925 fixed20_12 yclk, dram_channels, bandwidth; 926 fixed20_12 a; 927 928 a.full = dfixed_const(1000); 929 yclk.full = dfixed_const(wm->yclk); 930 yclk.full = dfixed_div(yclk, a); 931 dram_channels.full = dfixed_const(wm->dram_channels * 4); 932 a.full = dfixed_const(10); 933 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 934 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 935 bandwidth.full = dfixed_mul(dram_channels, yclk); 936 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 937 938 return dfixed_trunc(bandwidth); 939 } 940 941 /** 942 * dce_v8_0_data_return_bandwidth - get the data return bandwidth 943 * 944 * @wm: watermark calculation data 945 * 946 * Calculate the data return bandwidth used for display (CIK). 947 * Used for display watermark bandwidth calculations 948 * Returns the data return bandwidth in MBytes/s 949 */ 950 static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm) 951 { 952 /* Calculate the display Data return Bandwidth */ 953 fixed20_12 return_efficiency; /* 0.8 */ 954 fixed20_12 sclk, bandwidth; 955 fixed20_12 a; 956 957 a.full = dfixed_const(1000); 958 sclk.full = dfixed_const(wm->sclk); 959 sclk.full = dfixed_div(sclk, a); 960 a.full = dfixed_const(10); 961 return_efficiency.full = dfixed_const(8); 962 return_efficiency.full = dfixed_div(return_efficiency, a); 963 a.full = dfixed_const(32); 964 bandwidth.full = dfixed_mul(a, sclk); 965 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 966 967 return dfixed_trunc(bandwidth); 968 } 969 970 /** 971 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth 972 * 973 * @wm: watermark calculation data 974 * 975 * Calculate the dmif bandwidth used for display (CIK). 976 * Used for display watermark bandwidth calculations 977 * Returns the dmif bandwidth in MBytes/s 978 */ 979 static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm) 980 { 981 /* Calculate the DMIF Request Bandwidth */ 982 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 983 fixed20_12 disp_clk, bandwidth; 984 fixed20_12 a, b; 985 986 a.full = dfixed_const(1000); 987 disp_clk.full = dfixed_const(wm->disp_clk); 988 disp_clk.full = dfixed_div(disp_clk, a); 989 a.full = dfixed_const(32); 990 b.full = dfixed_mul(a, disp_clk); 991 992 a.full = dfixed_const(10); 993 disp_clk_request_efficiency.full = dfixed_const(8); 994 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 995 996 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 997 998 return dfixed_trunc(bandwidth); 999 } 1000 1001 /** 1002 * dce_v8_0_available_bandwidth - get the min available bandwidth 1003 * 1004 * @wm: watermark calculation data 1005 * 1006 * Calculate the min available bandwidth used for display (CIK). 1007 * Used for display watermark bandwidth calculations 1008 * Returns the min available bandwidth in MBytes/s 1009 */ 1010 static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm) 1011 { 1012 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 1013 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm); 1014 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm); 1015 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm); 1016 1017 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 1018 } 1019 1020 /** 1021 * dce_v8_0_average_bandwidth - get the average available bandwidth 1022 * 1023 * @wm: watermark calculation data 1024 * 1025 * Calculate the average available bandwidth used for display (CIK). 1026 * Used for display watermark bandwidth calculations 1027 * Returns the average available bandwidth in MBytes/s 1028 */ 1029 static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm) 1030 { 1031 /* Calculate the display mode Average Bandwidth 1032 * DisplayMode should contain the source and destination dimensions, 1033 * timing, etc. 1034 */ 1035 fixed20_12 bpp; 1036 fixed20_12 line_time; 1037 fixed20_12 src_width; 1038 fixed20_12 bandwidth; 1039 fixed20_12 a; 1040 1041 a.full = dfixed_const(1000); 1042 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 1043 line_time.full = dfixed_div(line_time, a); 1044 bpp.full = dfixed_const(wm->bytes_per_pixel); 1045 src_width.full = dfixed_const(wm->src_width); 1046 bandwidth.full = dfixed_mul(src_width, bpp); 1047 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 1048 bandwidth.full = dfixed_div(bandwidth, line_time); 1049 1050 return dfixed_trunc(bandwidth); 1051 } 1052 1053 /** 1054 * dce_v8_0_latency_watermark - get the latency watermark 1055 * 1056 * @wm: watermark calculation data 1057 * 1058 * Calculate the latency watermark (CIK). 1059 * Used for display watermark bandwidth calculations 1060 * Returns the latency watermark in ns 1061 */ 1062 static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm) 1063 { 1064 /* First calculate the latency in ns */ 1065 u32 mc_latency = 2000; /* 2000 ns. */ 1066 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm); 1067 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 1068 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 1069 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 1070 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 1071 (wm->num_heads * cursor_line_pair_return_time); 1072 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 1073 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 1074 u32 tmp, dmif_size = 12288; 1075 fixed20_12 a, b, c; 1076 1077 if (wm->num_heads == 0) 1078 return 0; 1079 1080 a.full = dfixed_const(2); 1081 b.full = dfixed_const(1); 1082 if ((wm->vsc.full > a.full) || 1083 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 1084 (wm->vtaps >= 5) || 1085 ((wm->vsc.full >= a.full) && wm->interlaced)) 1086 max_src_lines_per_dst_line = 4; 1087 else 1088 max_src_lines_per_dst_line = 2; 1089 1090 a.full = dfixed_const(available_bandwidth); 1091 b.full = dfixed_const(wm->num_heads); 1092 a.full = dfixed_div(a, b); 1093 1094 b.full = dfixed_const(mc_latency + 512); 1095 c.full = dfixed_const(wm->disp_clk); 1096 b.full = dfixed_div(b, c); 1097 1098 c.full = dfixed_const(dmif_size); 1099 b.full = dfixed_div(c, b); 1100 1101 tmp = min(dfixed_trunc(a), dfixed_trunc(b)); 1102 1103 b.full = dfixed_const(1000); 1104 c.full = dfixed_const(wm->disp_clk); 1105 b.full = dfixed_div(c, b); 1106 c.full = dfixed_const(wm->bytes_per_pixel); 1107 b.full = dfixed_mul(b, c); 1108 1109 lb_fill_bw = min(tmp, dfixed_trunc(b)); 1110 1111 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 1112 b.full = dfixed_const(1000); 1113 c.full = dfixed_const(lb_fill_bw); 1114 b.full = dfixed_div(c, b); 1115 a.full = dfixed_div(a, b); 1116 line_fill_time = dfixed_trunc(a); 1117 1118 if (line_fill_time < wm->active_time) 1119 return latency; 1120 else 1121 return latency + (line_fill_time - wm->active_time); 1122 1123 } 1124 1125 /** 1126 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check 1127 * average and available dram bandwidth 1128 * 1129 * @wm: watermark calculation data 1130 * 1131 * Check if the display average bandwidth fits in the display 1132 * dram bandwidth (CIK). 1133 * Used for display watermark bandwidth calculations 1134 * Returns true if the display fits, false if not. 1135 */ 1136 static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm) 1137 { 1138 if (dce_v8_0_average_bandwidth(wm) <= 1139 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 1140 return true; 1141 else 1142 return false; 1143 } 1144 1145 /** 1146 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check 1147 * average and available bandwidth 1148 * 1149 * @wm: watermark calculation data 1150 * 1151 * Check if the display average bandwidth fits in the display 1152 * available bandwidth (CIK). 1153 * Used for display watermark bandwidth calculations 1154 * Returns true if the display fits, false if not. 1155 */ 1156 static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm) 1157 { 1158 if (dce_v8_0_average_bandwidth(wm) <= 1159 (dce_v8_0_available_bandwidth(wm) / wm->num_heads)) 1160 return true; 1161 else 1162 return false; 1163 } 1164 1165 /** 1166 * dce_v8_0_check_latency_hiding - check latency hiding 1167 * 1168 * @wm: watermark calculation data 1169 * 1170 * Check latency hiding (CIK). 1171 * Used for display watermark bandwidth calculations 1172 * Returns true if the display fits, false if not. 1173 */ 1174 static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm) 1175 { 1176 u32 lb_partitions = wm->lb_size / wm->src_width; 1177 u32 line_time = wm->active_time + wm->blank_time; 1178 u32 latency_tolerant_lines; 1179 u32 latency_hiding; 1180 fixed20_12 a; 1181 1182 a.full = dfixed_const(1); 1183 if (wm->vsc.full > a.full) 1184 latency_tolerant_lines = 1; 1185 else { 1186 if (lb_partitions <= (wm->vtaps + 1)) 1187 latency_tolerant_lines = 1; 1188 else 1189 latency_tolerant_lines = 2; 1190 } 1191 1192 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 1193 1194 if (dce_v8_0_latency_watermark(wm) <= latency_hiding) 1195 return true; 1196 else 1197 return false; 1198 } 1199 1200 /** 1201 * dce_v8_0_program_watermarks - program display watermarks 1202 * 1203 * @adev: amdgpu_device pointer 1204 * @amdgpu_crtc: the selected display controller 1205 * @lb_size: line buffer size 1206 * @num_heads: number of display controllers in use 1207 * 1208 * Calculate and program the display watermarks for the 1209 * selected display controller (CIK). 1210 */ 1211 static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, 1212 struct amdgpu_crtc *amdgpu_crtc, 1213 u32 lb_size, u32 num_heads) 1214 { 1215 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1216 struct dce8_wm_params wm_low, wm_high; 1217 u32 pixel_period; 1218 u32 line_time = 0; 1219 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1220 u32 tmp, wm_mask; 1221 1222 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1223 pixel_period = 1000000 / (u32)mode->clock; 1224 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 1225 1226 /* watermark for high clocks */ 1227 if (adev->pm.dpm_enabled) { 1228 wm_high.yclk = 1229 amdgpu_dpm_get_mclk(adev, false) * 10; 1230 wm_high.sclk = 1231 amdgpu_dpm_get_sclk(adev, false) * 10; 1232 } else { 1233 wm_high.yclk = adev->pm.current_mclk * 10; 1234 wm_high.sclk = adev->pm.current_sclk * 10; 1235 } 1236 1237 wm_high.disp_clk = mode->clock; 1238 wm_high.src_width = mode->crtc_hdisplay; 1239 wm_high.active_time = mode->crtc_hdisplay * pixel_period; 1240 wm_high.blank_time = line_time - wm_high.active_time; 1241 wm_high.interlaced = false; 1242 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1243 wm_high.interlaced = true; 1244 wm_high.vsc = amdgpu_crtc->vsc; 1245 wm_high.vtaps = 1; 1246 if (amdgpu_crtc->rmx_type != RMX_OFF) 1247 wm_high.vtaps = 2; 1248 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1249 wm_high.lb_size = lb_size; 1250 wm_high.dram_channels = cik_get_number_of_dram_channels(adev); 1251 wm_high.num_heads = num_heads; 1252 1253 /* set for high clocks */ 1254 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535); 1255 1256 /* possibly force display priority to high */ 1257 /* should really do this at mode validation time... */ 1258 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 1259 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 1260 !dce_v8_0_check_latency_hiding(&wm_high) || 1261 (adev->mode_info.disp_priority == 2)) { 1262 DRM_DEBUG_KMS("force priority to high\n"); 1263 } 1264 1265 /* watermark for low clocks */ 1266 if (adev->pm.dpm_enabled) { 1267 wm_low.yclk = 1268 amdgpu_dpm_get_mclk(adev, true) * 10; 1269 wm_low.sclk = 1270 amdgpu_dpm_get_sclk(adev, true) * 10; 1271 } else { 1272 wm_low.yclk = adev->pm.current_mclk * 10; 1273 wm_low.sclk = adev->pm.current_sclk * 10; 1274 } 1275 1276 wm_low.disp_clk = mode->clock; 1277 wm_low.src_width = mode->crtc_hdisplay; 1278 wm_low.active_time = mode->crtc_hdisplay * pixel_period; 1279 wm_low.blank_time = line_time - wm_low.active_time; 1280 wm_low.interlaced = false; 1281 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1282 wm_low.interlaced = true; 1283 wm_low.vsc = amdgpu_crtc->vsc; 1284 wm_low.vtaps = 1; 1285 if (amdgpu_crtc->rmx_type != RMX_OFF) 1286 wm_low.vtaps = 2; 1287 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1288 wm_low.lb_size = lb_size; 1289 wm_low.dram_channels = cik_get_number_of_dram_channels(adev); 1290 wm_low.num_heads = num_heads; 1291 1292 /* set for low clocks */ 1293 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535); 1294 1295 /* possibly force display priority to high */ 1296 /* should really do this at mode validation time... */ 1297 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 1298 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 1299 !dce_v8_0_check_latency_hiding(&wm_low) || 1300 (adev->mode_info.disp_priority == 2)) { 1301 DRM_DEBUG_KMS("force priority to high\n"); 1302 } 1303 } 1304 1305 /* select wm A */ 1306 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1307 tmp = wm_mask; 1308 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1309 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1310 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1311 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1312 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1313 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1314 /* select wm B */ 1315 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1316 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1317 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1318 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1319 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1320 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1321 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1322 /* restore original selection */ 1323 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); 1324 1325 /* save values for DPM */ 1326 amdgpu_crtc->line_time = line_time; 1327 amdgpu_crtc->wm_high = latency_watermark_a; 1328 amdgpu_crtc->wm_low = latency_watermark_b; 1329 } 1330 1331 /** 1332 * dce_v8_0_bandwidth_update - program display watermarks 1333 * 1334 * @adev: amdgpu_device pointer 1335 * 1336 * Calculate and program the display watermarks and line 1337 * buffer allocation (CIK). 1338 */ 1339 static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev) 1340 { 1341 struct drm_display_mode *mode = NULL; 1342 u32 num_heads = 0, lb_size; 1343 int i; 1344 1345 amdgpu_update_display_priority(adev); 1346 1347 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1348 if (adev->mode_info.crtcs[i]->base.enabled) 1349 num_heads++; 1350 } 1351 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1352 mode = &adev->mode_info.crtcs[i]->base.mode; 1353 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); 1354 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i], 1355 lb_size, num_heads); 1356 } 1357 } 1358 1359 static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev) 1360 { 1361 int i; 1362 u32 offset, tmp; 1363 1364 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1365 offset = adev->mode_info.audio.pin[i].offset; 1366 tmp = RREG32_AUDIO_ENDPT(offset, 1367 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1368 if (((tmp & 1369 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> 1370 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) 1371 adev->mode_info.audio.pin[i].connected = false; 1372 else 1373 adev->mode_info.audio.pin[i].connected = true; 1374 } 1375 } 1376 1377 static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev) 1378 { 1379 int i; 1380 1381 dce_v8_0_audio_get_connected_pins(adev); 1382 1383 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1384 if (adev->mode_info.audio.pin[i].connected) 1385 return &adev->mode_info.audio.pin[i]; 1386 } 1387 DRM_ERROR("No connected audio pins found!\n"); 1388 return NULL; 1389 } 1390 1391 static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) 1392 { 1393 struct amdgpu_device *adev = encoder->dev->dev_private; 1394 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1395 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1396 u32 offset; 1397 1398 if (!dig || !dig->afmt || !dig->afmt->pin) 1399 return; 1400 1401 offset = dig->afmt->offset; 1402 1403 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset, 1404 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT)); 1405 } 1406 1407 static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, 1408 struct drm_display_mode *mode) 1409 { 1410 struct amdgpu_device *adev = encoder->dev->dev_private; 1411 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1412 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1413 struct drm_connector *connector; 1414 struct amdgpu_connector *amdgpu_connector = NULL; 1415 u32 tmp = 0, offset; 1416 1417 if (!dig || !dig->afmt || !dig->afmt->pin) 1418 return; 1419 1420 offset = dig->afmt->pin->offset; 1421 1422 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1423 if (connector->encoder == encoder) { 1424 amdgpu_connector = to_amdgpu_connector(connector); 1425 break; 1426 } 1427 } 1428 1429 if (!amdgpu_connector) { 1430 DRM_ERROR("Couldn't find encoder's connector\n"); 1431 return; 1432 } 1433 1434 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1435 if (connector->latency_present[1]) 1436 tmp = 1437 (connector->video_latency[1] << 1438 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1439 (connector->audio_latency[1] << 1440 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1441 else 1442 tmp = 1443 (0 << 1444 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1445 (0 << 1446 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1447 } else { 1448 if (connector->latency_present[0]) 1449 tmp = 1450 (connector->video_latency[0] << 1451 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1452 (connector->audio_latency[0] << 1453 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1454 else 1455 tmp = 1456 (0 << 1457 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1458 (0 << 1459 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1460 1461 } 1462 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1463 } 1464 1465 static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1466 { 1467 struct amdgpu_device *adev = encoder->dev->dev_private; 1468 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1469 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1470 struct drm_connector *connector; 1471 struct amdgpu_connector *amdgpu_connector = NULL; 1472 u32 offset, tmp; 1473 u8 *sadb = NULL; 1474 int sad_count; 1475 1476 if (!dig || !dig->afmt || !dig->afmt->pin) 1477 return; 1478 1479 offset = dig->afmt->pin->offset; 1480 1481 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1482 if (connector->encoder == encoder) { 1483 amdgpu_connector = to_amdgpu_connector(connector); 1484 break; 1485 } 1486 } 1487 1488 if (!amdgpu_connector) { 1489 DRM_ERROR("Couldn't find encoder's connector\n"); 1490 return; 1491 } 1492 1493 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); 1494 if (sad_count < 0) { 1495 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1496 sad_count = 0; 1497 } 1498 1499 /* program the speaker allocation */ 1500 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1501 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK | 1502 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK); 1503 /* set HDMI mode */ 1504 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK; 1505 if (sad_count) 1506 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); 1507 else 1508 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */ 1509 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1510 1511 kfree(sadb); 1512 } 1513 1514 static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) 1515 { 1516 struct amdgpu_device *adev = encoder->dev->dev_private; 1517 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1518 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1519 u32 offset; 1520 struct drm_connector *connector; 1521 struct amdgpu_connector *amdgpu_connector = NULL; 1522 struct cea_sad *sads; 1523 int i, sad_count; 1524 1525 static const u16 eld_reg_to_type[][2] = { 1526 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1527 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1528 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1529 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1530 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1531 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1532 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1533 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1534 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1535 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1536 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1537 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1538 }; 1539 1540 if (!dig || !dig->afmt || !dig->afmt->pin) 1541 return; 1542 1543 offset = dig->afmt->pin->offset; 1544 1545 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1546 if (connector->encoder == encoder) { 1547 amdgpu_connector = to_amdgpu_connector(connector); 1548 break; 1549 } 1550 } 1551 1552 if (!amdgpu_connector) { 1553 DRM_ERROR("Couldn't find encoder's connector\n"); 1554 return; 1555 } 1556 1557 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); 1558 if (sad_count <= 0) { 1559 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1560 return; 1561 } 1562 BUG_ON(!sads); 1563 1564 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1565 u32 value = 0; 1566 u8 stereo_freqs = 0; 1567 int max_channels = -1; 1568 int j; 1569 1570 for (j = 0; j < sad_count; j++) { 1571 struct cea_sad *sad = &sads[j]; 1572 1573 if (sad->format == eld_reg_to_type[i][1]) { 1574 if (sad->channels > max_channels) { 1575 value = (sad->channels << 1576 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | 1577 (sad->byte2 << 1578 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | 1579 (sad->freq << 1580 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); 1581 max_channels = sad->channels; 1582 } 1583 1584 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1585 stereo_freqs |= sad->freq; 1586 else 1587 break; 1588 } 1589 } 1590 1591 value |= (stereo_freqs << 1592 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT); 1593 1594 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value); 1595 } 1596 1597 kfree(sads); 1598 } 1599 1600 static void dce_v8_0_audio_enable(struct amdgpu_device *adev, 1601 struct amdgpu_audio_pin *pin, 1602 bool enable) 1603 { 1604 if (!pin) 1605 return; 1606 1607 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1608 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1609 } 1610 1611 static const u32 pin_offsets[7] = 1612 { 1613 (0x1780 - 0x1780), 1614 (0x1786 - 0x1780), 1615 (0x178c - 0x1780), 1616 (0x1792 - 0x1780), 1617 (0x1798 - 0x1780), 1618 (0x179d - 0x1780), 1619 (0x17a4 - 0x1780), 1620 }; 1621 1622 static int dce_v8_0_audio_init(struct amdgpu_device *adev) 1623 { 1624 int i; 1625 1626 if (!amdgpu_audio) 1627 return 0; 1628 1629 adev->mode_info.audio.enabled = true; 1630 1631 if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */ 1632 adev->mode_info.audio.num_pins = 7; 1633 else if ((adev->asic_type == CHIP_KABINI) || 1634 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */ 1635 adev->mode_info.audio.num_pins = 3; 1636 else if ((adev->asic_type == CHIP_BONAIRE) || 1637 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */ 1638 adev->mode_info.audio.num_pins = 7; 1639 else 1640 adev->mode_info.audio.num_pins = 3; 1641 1642 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1643 adev->mode_info.audio.pin[i].channels = -1; 1644 adev->mode_info.audio.pin[i].rate = -1; 1645 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1646 adev->mode_info.audio.pin[i].status_bits = 0; 1647 adev->mode_info.audio.pin[i].category_code = 0; 1648 adev->mode_info.audio.pin[i].connected = false; 1649 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1650 adev->mode_info.audio.pin[i].id = i; 1651 /* disable audio. it will be set up later */ 1652 /* XXX remove once we switch to ip funcs */ 1653 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1654 } 1655 1656 return 0; 1657 } 1658 1659 static void dce_v8_0_audio_fini(struct amdgpu_device *adev) 1660 { 1661 int i; 1662 1663 if (!adev->mode_info.audio.enabled) 1664 return; 1665 1666 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1667 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1668 1669 adev->mode_info.audio.enabled = false; 1670 } 1671 1672 /* 1673 * update the N and CTS parameters for a given pixel clock rate 1674 */ 1675 static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) 1676 { 1677 struct drm_device *dev = encoder->dev; 1678 struct amdgpu_device *adev = dev->dev_private; 1679 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1680 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1681 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1682 uint32_t offset = dig->afmt->offset; 1683 1684 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); 1685 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz); 1686 1687 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); 1688 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz); 1689 1690 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT)); 1691 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz); 1692 } 1693 1694 /* 1695 * build a HDMI Video Info Frame 1696 */ 1697 static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, 1698 void *buffer, size_t size) 1699 { 1700 struct drm_device *dev = encoder->dev; 1701 struct amdgpu_device *adev = dev->dev_private; 1702 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1703 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1704 uint32_t offset = dig->afmt->offset; 1705 uint8_t *frame = buffer + 3; 1706 uint8_t *header = buffer; 1707 1708 WREG32(mmAFMT_AVI_INFO0 + offset, 1709 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 1710 WREG32(mmAFMT_AVI_INFO1 + offset, 1711 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); 1712 WREG32(mmAFMT_AVI_INFO2 + offset, 1713 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 1714 WREG32(mmAFMT_AVI_INFO3 + offset, 1715 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); 1716 } 1717 1718 static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1719 { 1720 struct drm_device *dev = encoder->dev; 1721 struct amdgpu_device *adev = dev->dev_private; 1722 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1723 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1724 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1725 u32 dto_phase = 24 * 1000; 1726 u32 dto_modulo = clock; 1727 1728 if (!dig || !dig->afmt) 1729 return; 1730 1731 /* XXX two dtos; generally use dto0 for hdmi */ 1732 /* Express [24MHz / target pixel clock] as an exact rational 1733 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1734 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1735 */ 1736 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT)); 1737 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); 1738 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); 1739 } 1740 1741 /* 1742 * update the info frames with the data from the current display mode 1743 */ 1744 static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, 1745 struct drm_display_mode *mode) 1746 { 1747 struct drm_device *dev = encoder->dev; 1748 struct amdgpu_device *adev = dev->dev_private; 1749 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1750 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1751 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1752 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1753 struct hdmi_avi_infoframe frame; 1754 uint32_t offset, val; 1755 ssize_t err; 1756 int bpc = 8; 1757 1758 if (!dig || !dig->afmt) 1759 return; 1760 1761 /* Silent, r600_hdmi_enable will raise WARN for us */ 1762 if (!dig->afmt->enabled) 1763 return; 1764 offset = dig->afmt->offset; 1765 1766 /* hdmi deep color mode general control packets setup, if bpc > 8 */ 1767 if (encoder->crtc) { 1768 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1769 bpc = amdgpu_crtc->bpc; 1770 } 1771 1772 /* disable audio prior to setting up hw */ 1773 dig->afmt->pin = dce_v8_0_audio_get_pin(adev); 1774 dce_v8_0_audio_enable(adev, dig->afmt->pin, false); 1775 1776 dce_v8_0_audio_set_dto(encoder, mode->clock); 1777 1778 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset, 1779 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */ 1780 1781 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000); 1782 1783 val = RREG32(mmHDMI_CONTROL + offset); 1784 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1785 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK; 1786 1787 switch (bpc) { 1788 case 0: 1789 case 6: 1790 case 8: 1791 case 16: 1792 default: 1793 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", 1794 connector->name, bpc); 1795 break; 1796 case 10: 1797 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1798 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT; 1799 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", 1800 connector->name); 1801 break; 1802 case 12: 1803 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1804 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT; 1805 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", 1806 connector->name); 1807 break; 1808 } 1809 1810 WREG32(mmHDMI_CONTROL + offset, val); 1811 1812 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset, 1813 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */ 1814 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */ 1815 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */ 1816 1817 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset, 1818 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */ 1819 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */ 1820 1821 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset, 1822 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */ 1823 1824 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset, 1825 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */ 1826 1827 WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */ 1828 1829 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset, 1830 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */ 1831 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */ 1832 1833 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset, 1834 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */ 1835 1836 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 1837 1838 if (bpc > 8) 1839 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset, 1840 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */ 1841 else 1842 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset, 1843 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */ 1844 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */ 1845 1846 dce_v8_0_afmt_update_ACR(encoder, mode->clock); 1847 1848 WREG32(mmAFMT_60958_0 + offset, 1849 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT)); 1850 1851 WREG32(mmAFMT_60958_1 + offset, 1852 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT)); 1853 1854 WREG32(mmAFMT_60958_2 + offset, 1855 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) | 1856 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) | 1857 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) | 1858 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) | 1859 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) | 1860 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT)); 1861 1862 dce_v8_0_audio_write_speaker_allocation(encoder); 1863 1864 1865 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset, 1866 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); 1867 1868 dce_v8_0_afmt_audio_select_pin(encoder); 1869 dce_v8_0_audio_write_sad_regs(encoder); 1870 dce_v8_0_audio_write_latency_fields(encoder, mode); 1871 1872 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 1873 if (err < 0) { 1874 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1875 return; 1876 } 1877 1878 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1879 if (err < 0) { 1880 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1881 return; 1882 } 1883 1884 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 1885 1886 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset, 1887 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */ 1888 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */ 1889 1890 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset, 1891 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */ 1892 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK); 1893 1894 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset, 1895 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */ 1896 1897 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 1898 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); 1899 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); 1900 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001); 1901 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001); 1902 1903 /* enable audio after to setting up hw */ 1904 dce_v8_0_audio_enable(adev, dig->afmt->pin, true); 1905 } 1906 1907 static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1908 { 1909 struct drm_device *dev = encoder->dev; 1910 struct amdgpu_device *adev = dev->dev_private; 1911 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1912 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1913 1914 if (!dig || !dig->afmt) 1915 return; 1916 1917 /* Silent, r600_hdmi_enable will raise WARN for us */ 1918 if (enable && dig->afmt->enabled) 1919 return; 1920 if (!enable && !dig->afmt->enabled) 1921 return; 1922 1923 if (!enable && dig->afmt->pin) { 1924 dce_v8_0_audio_enable(adev, dig->afmt->pin, false); 1925 dig->afmt->pin = NULL; 1926 } 1927 1928 dig->afmt->enabled = enable; 1929 1930 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1931 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1932 } 1933 1934 static void dce_v8_0_afmt_init(struct amdgpu_device *adev) 1935 { 1936 int i; 1937 1938 for (i = 0; i < adev->mode_info.num_dig; i++) 1939 adev->mode_info.afmt[i] = NULL; 1940 1941 /* DCE8 has audio blocks tied to DIG encoders */ 1942 for (i = 0; i < adev->mode_info.num_dig; i++) { 1943 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1944 if (adev->mode_info.afmt[i]) { 1945 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1946 adev->mode_info.afmt[i]->id = i; 1947 } 1948 } 1949 } 1950 1951 static void dce_v8_0_afmt_fini(struct amdgpu_device *adev) 1952 { 1953 int i; 1954 1955 for (i = 0; i < adev->mode_info.num_dig; i++) { 1956 kfree(adev->mode_info.afmt[i]); 1957 adev->mode_info.afmt[i] = NULL; 1958 } 1959 } 1960 1961 static const u32 vga_control_regs[6] = 1962 { 1963 mmD1VGA_CONTROL, 1964 mmD2VGA_CONTROL, 1965 mmD3VGA_CONTROL, 1966 mmD4VGA_CONTROL, 1967 mmD5VGA_CONTROL, 1968 mmD6VGA_CONTROL, 1969 }; 1970 1971 static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable) 1972 { 1973 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1974 struct drm_device *dev = crtc->dev; 1975 struct amdgpu_device *adev = dev->dev_private; 1976 u32 vga_control; 1977 1978 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 1979 if (enable) 1980 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); 1981 else 1982 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); 1983 } 1984 1985 static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable) 1986 { 1987 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1988 struct drm_device *dev = crtc->dev; 1989 struct amdgpu_device *adev = dev->dev_private; 1990 1991 if (enable) 1992 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); 1993 else 1994 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); 1995 } 1996 1997 static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, 1998 struct drm_framebuffer *fb, 1999 int x, int y, int atomic) 2000 { 2001 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2002 struct drm_device *dev = crtc->dev; 2003 struct amdgpu_device *adev = dev->dev_private; 2004 struct amdgpu_framebuffer *amdgpu_fb; 2005 struct drm_framebuffer *target_fb; 2006 struct drm_gem_object *obj; 2007 struct amdgpu_bo *rbo; 2008 uint64_t fb_location, tiling_flags; 2009 uint32_t fb_format, fb_pitch_pixels; 2010 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2011 u32 pipe_config; 2012 u32 tmp, viewport_w, viewport_h; 2013 int r; 2014 bool bypass_lut = false; 2015 2016 /* no fb bound */ 2017 if (!atomic && !crtc->primary->fb) { 2018 DRM_DEBUG_KMS("No FB bound\n"); 2019 return 0; 2020 } 2021 2022 if (atomic) { 2023 amdgpu_fb = to_amdgpu_framebuffer(fb); 2024 target_fb = fb; 2025 } 2026 else { 2027 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2028 target_fb = crtc->primary->fb; 2029 } 2030 2031 /* If atomic, assume fb object is pinned & idle & fenced and 2032 * just update base pointers 2033 */ 2034 obj = amdgpu_fb->obj; 2035 rbo = gem_to_amdgpu_bo(obj); 2036 r = amdgpu_bo_reserve(rbo, false); 2037 if (unlikely(r != 0)) 2038 return r; 2039 2040 if (atomic) 2041 fb_location = amdgpu_bo_gpu_offset(rbo); 2042 else { 2043 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); 2044 if (unlikely(r != 0)) { 2045 amdgpu_bo_unreserve(rbo); 2046 return -EINVAL; 2047 } 2048 } 2049 2050 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); 2051 amdgpu_bo_unreserve(rbo); 2052 2053 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 2054 2055 switch (target_fb->pixel_format) { 2056 case DRM_FORMAT_C8: 2057 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2058 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2059 break; 2060 case DRM_FORMAT_XRGB4444: 2061 case DRM_FORMAT_ARGB4444: 2062 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2063 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2064 #ifdef __BIG_ENDIAN 2065 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2066 #endif 2067 break; 2068 case DRM_FORMAT_XRGB1555: 2069 case DRM_FORMAT_ARGB1555: 2070 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2071 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2072 #ifdef __BIG_ENDIAN 2073 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2074 #endif 2075 break; 2076 case DRM_FORMAT_BGRX5551: 2077 case DRM_FORMAT_BGRA5551: 2078 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2079 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2080 #ifdef __BIG_ENDIAN 2081 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2082 #endif 2083 break; 2084 case DRM_FORMAT_RGB565: 2085 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2086 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2087 #ifdef __BIG_ENDIAN 2088 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2089 #endif 2090 break; 2091 case DRM_FORMAT_XRGB8888: 2092 case DRM_FORMAT_ARGB8888: 2093 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2094 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2095 #ifdef __BIG_ENDIAN 2096 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2097 #endif 2098 break; 2099 case DRM_FORMAT_XRGB2101010: 2100 case DRM_FORMAT_ARGB2101010: 2101 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2102 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2103 #ifdef __BIG_ENDIAN 2104 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2105 #endif 2106 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2107 bypass_lut = true; 2108 break; 2109 case DRM_FORMAT_BGRX1010102: 2110 case DRM_FORMAT_BGRA1010102: 2111 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2112 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2113 #ifdef __BIG_ENDIAN 2114 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2115 #endif 2116 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2117 bypass_lut = true; 2118 break; 2119 default: 2120 DRM_ERROR("Unsupported screen format %s\n", 2121 drm_get_format_name(target_fb->pixel_format)); 2122 return -EINVAL; 2123 } 2124 2125 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 2126 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 2127 2128 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 2129 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 2130 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 2131 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 2132 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 2133 2134 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT); 2135 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); 2136 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT); 2137 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT); 2138 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT); 2139 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT); 2140 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT); 2141 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 2142 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); 2143 } 2144 2145 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT); 2146 2147 dce_v8_0_vga_enable(crtc, false); 2148 2149 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2150 upper_32_bits(fb_location)); 2151 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2152 upper_32_bits(fb_location)); 2153 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2154 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 2155 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2156 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); 2157 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2158 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 2159 2160 /* 2161 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 2162 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 2163 * retain the full precision throughout the pipeline. 2164 */ 2165 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset, 2166 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0), 2167 ~LUT_10BIT_BYPASS_EN); 2168 2169 if (bypass_lut) 2170 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 2171 2172 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 2173 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 2174 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 2175 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 2176 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 2177 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 2178 2179 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); 2180 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 2181 2182 dce_v8_0_grph_enable(crtc, true); 2183 2184 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 2185 target_fb->height); 2186 2187 x &= ~3; 2188 y &= ~1; 2189 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 2190 (x << 16) | y); 2191 viewport_w = crtc->mode.hdisplay; 2192 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 2193 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 2194 (viewport_w << 16) | viewport_h); 2195 2196 /* pageflip setup */ 2197 /* make sure flip is at vb rather than hb */ 2198 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 2199 tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK; 2200 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2201 2202 /* set pageflip to happen only at start of vblank interval (front porch) */ 2203 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); 2204 2205 if (!atomic && fb && fb != crtc->primary->fb) { 2206 amdgpu_fb = to_amdgpu_framebuffer(fb); 2207 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2208 r = amdgpu_bo_reserve(rbo, false); 2209 if (unlikely(r != 0)) 2210 return r; 2211 amdgpu_bo_unpin(rbo); 2212 amdgpu_bo_unreserve(rbo); 2213 } 2214 2215 /* Bytes per pixel may have changed */ 2216 dce_v8_0_bandwidth_update(adev); 2217 2218 return 0; 2219 } 2220 2221 static void dce_v8_0_set_interleave(struct drm_crtc *crtc, 2222 struct drm_display_mode *mode) 2223 { 2224 struct drm_device *dev = crtc->dev; 2225 struct amdgpu_device *adev = dev->dev_private; 2226 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2227 2228 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2229 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 2230 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT); 2231 else 2232 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0); 2233 } 2234 2235 static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc) 2236 { 2237 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2238 struct drm_device *dev = crtc->dev; 2239 struct amdgpu_device *adev = dev->dev_private; 2240 int i; 2241 2242 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2243 2244 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2245 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | 2246 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); 2247 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, 2248 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); 2249 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, 2250 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); 2251 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2252 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | 2253 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); 2254 2255 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2256 2257 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2258 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2259 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2260 2261 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2262 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2263 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2264 2265 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2266 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2267 2268 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2269 for (i = 0; i < 256; i++) { 2270 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2271 (amdgpu_crtc->lut_r[i] << 20) | 2272 (amdgpu_crtc->lut_g[i] << 10) | 2273 (amdgpu_crtc->lut_b[i] << 0)); 2274 } 2275 2276 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2277 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | 2278 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | 2279 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); 2280 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, 2281 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | 2282 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); 2283 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2284 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | 2285 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); 2286 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2287 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | 2288 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); 2289 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2290 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); 2291 /* XXX this only needs to be programmed once per crtc at startup, 2292 * not sure where the best place for it is 2293 */ 2294 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, 2295 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK); 2296 } 2297 2298 static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder) 2299 { 2300 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2301 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2302 2303 switch (amdgpu_encoder->encoder_id) { 2304 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2305 if (dig->linkb) 2306 return 1; 2307 else 2308 return 0; 2309 break; 2310 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2311 if (dig->linkb) 2312 return 3; 2313 else 2314 return 2; 2315 break; 2316 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2317 if (dig->linkb) 2318 return 5; 2319 else 2320 return 4; 2321 break; 2322 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2323 return 6; 2324 break; 2325 default: 2326 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2327 return 0; 2328 } 2329 } 2330 2331 /** 2332 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc. 2333 * 2334 * @crtc: drm crtc 2335 * 2336 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2337 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2338 * monitors a dedicated PPLL must be used. If a particular board has 2339 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2340 * as there is no need to program the PLL itself. If we are not able to 2341 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2342 * avoid messing up an existing monitor. 2343 * 2344 * Asic specific PLL information 2345 * 2346 * DCE 8.x 2347 * KB/KV 2348 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) 2349 * CI 2350 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 2351 * 2352 */ 2353 static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc) 2354 { 2355 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2356 struct drm_device *dev = crtc->dev; 2357 struct amdgpu_device *adev = dev->dev_private; 2358 u32 pll_in_use; 2359 int pll; 2360 2361 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2362 if (adev->clock.dp_extclk) 2363 /* skip PPLL programming if using ext clock */ 2364 return ATOM_PPLL_INVALID; 2365 else { 2366 /* use the same PPLL for all DP monitors */ 2367 pll = amdgpu_pll_get_shared_dp_ppll(crtc); 2368 if (pll != ATOM_PPLL_INVALID) 2369 return pll; 2370 } 2371 } else { 2372 /* use the same PPLL for all monitors with the same clock */ 2373 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2374 if (pll != ATOM_PPLL_INVALID) 2375 return pll; 2376 } 2377 /* otherwise, pick one of the plls */ 2378 if ((adev->asic_type == CHIP_KABINI) || 2379 (adev->asic_type == CHIP_MULLINS)) { 2380 /* KB/ML has PPLL1 and PPLL2 */ 2381 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2382 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2383 return ATOM_PPLL2; 2384 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2385 return ATOM_PPLL1; 2386 DRM_ERROR("unable to allocate a PPLL\n"); 2387 return ATOM_PPLL_INVALID; 2388 } else { 2389 /* CI/KV has PPLL0, PPLL1, and PPLL2 */ 2390 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2391 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2392 return ATOM_PPLL2; 2393 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2394 return ATOM_PPLL1; 2395 if (!(pll_in_use & (1 << ATOM_PPLL0))) 2396 return ATOM_PPLL0; 2397 DRM_ERROR("unable to allocate a PPLL\n"); 2398 return ATOM_PPLL_INVALID; 2399 } 2400 return ATOM_PPLL_INVALID; 2401 } 2402 2403 static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2404 { 2405 struct amdgpu_device *adev = crtc->dev->dev_private; 2406 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2407 uint32_t cur_lock; 2408 2409 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2410 if (lock) 2411 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2412 else 2413 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2414 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2415 } 2416 2417 static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) 2418 { 2419 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2420 struct amdgpu_device *adev = crtc->dev->dev_private; 2421 2422 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2423 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2424 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2425 } 2426 2427 static void dce_v8_0_show_cursor(struct drm_crtc *crtc) 2428 { 2429 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2430 struct amdgpu_device *adev = crtc->dev->dev_private; 2431 2432 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2433 CUR_CONTROL__CURSOR_EN_MASK | 2434 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2435 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2436 } 2437 2438 static void dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, 2439 uint64_t gpu_addr) 2440 { 2441 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2442 struct amdgpu_device *adev = crtc->dev->dev_private; 2443 2444 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2445 upper_32_bits(gpu_addr)); 2446 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2447 gpu_addr & 0xffffffff); 2448 } 2449 2450 static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc, 2451 int x, int y) 2452 { 2453 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2454 struct amdgpu_device *adev = crtc->dev->dev_private; 2455 int xorigin = 0, yorigin = 0; 2456 2457 /* avivo cursor are offset into the total surface */ 2458 x += crtc->x; 2459 y += crtc->y; 2460 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2461 2462 if (x < 0) { 2463 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2464 x = 0; 2465 } 2466 if (y < 0) { 2467 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2468 y = 0; 2469 } 2470 2471 dce_v8_0_lock_cursor(crtc, true); 2472 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2473 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2474 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2475 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2476 dce_v8_0_lock_cursor(crtc, false); 2477 2478 return 0; 2479 } 2480 2481 static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc, 2482 struct drm_file *file_priv, 2483 uint32_t handle, 2484 uint32_t width, 2485 uint32_t height) 2486 { 2487 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2488 struct drm_gem_object *obj; 2489 struct amdgpu_bo *robj; 2490 uint64_t gpu_addr; 2491 int ret; 2492 2493 if (!handle) { 2494 /* turn off cursor */ 2495 dce_v8_0_hide_cursor(crtc); 2496 obj = NULL; 2497 goto unpin; 2498 } 2499 2500 if ((width > amdgpu_crtc->max_cursor_width) || 2501 (height > amdgpu_crtc->max_cursor_height)) { 2502 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2503 return -EINVAL; 2504 } 2505 2506 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 2507 if (!obj) { 2508 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2509 return -ENOENT; 2510 } 2511 2512 robj = gem_to_amdgpu_bo(obj); 2513 ret = amdgpu_bo_reserve(robj, false); 2514 if (unlikely(ret != 0)) 2515 goto fail; 2516 ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 2517 0, 0, &gpu_addr); 2518 amdgpu_bo_unreserve(robj); 2519 if (ret) 2520 goto fail; 2521 2522 amdgpu_crtc->cursor_width = width; 2523 amdgpu_crtc->cursor_height = height; 2524 2525 dce_v8_0_lock_cursor(crtc, true); 2526 dce_v8_0_set_cursor(crtc, obj, gpu_addr); 2527 dce_v8_0_show_cursor(crtc); 2528 dce_v8_0_lock_cursor(crtc, false); 2529 2530 unpin: 2531 if (amdgpu_crtc->cursor_bo) { 2532 robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2533 ret = amdgpu_bo_reserve(robj, false); 2534 if (likely(ret == 0)) { 2535 amdgpu_bo_unpin(robj); 2536 amdgpu_bo_unreserve(robj); 2537 } 2538 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); 2539 } 2540 2541 amdgpu_crtc->cursor_bo = obj; 2542 return 0; 2543 fail: 2544 drm_gem_object_unreference_unlocked(obj); 2545 2546 return ret; 2547 } 2548 2549 static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2550 u16 *blue, uint32_t start, uint32_t size) 2551 { 2552 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2553 int end = (start + size > 256) ? 256 : start + size, i; 2554 2555 /* userspace palettes are always correct as is */ 2556 for (i = start; i < end; i++) { 2557 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2558 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2559 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2560 } 2561 dce_v8_0_crtc_load_lut(crtc); 2562 } 2563 2564 static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) 2565 { 2566 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2567 2568 drm_crtc_cleanup(crtc); 2569 destroy_workqueue(amdgpu_crtc->pflip_queue); 2570 kfree(amdgpu_crtc); 2571 } 2572 2573 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { 2574 .cursor_set = dce_v8_0_crtc_cursor_set, 2575 .cursor_move = dce_v8_0_crtc_cursor_move, 2576 .gamma_set = dce_v8_0_crtc_gamma_set, 2577 .set_config = amdgpu_crtc_set_config, 2578 .destroy = dce_v8_0_crtc_destroy, 2579 .page_flip = amdgpu_crtc_page_flip, 2580 }; 2581 2582 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2583 { 2584 struct drm_device *dev = crtc->dev; 2585 struct amdgpu_device *adev = dev->dev_private; 2586 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2587 unsigned type; 2588 2589 switch (mode) { 2590 case DRM_MODE_DPMS_ON: 2591 amdgpu_crtc->enabled = true; 2592 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2593 dce_v8_0_vga_enable(crtc, true); 2594 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2595 dce_v8_0_vga_enable(crtc, false); 2596 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2597 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2598 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2599 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2600 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2601 dce_v8_0_crtc_load_lut(crtc); 2602 break; 2603 case DRM_MODE_DPMS_STANDBY: 2604 case DRM_MODE_DPMS_SUSPEND: 2605 case DRM_MODE_DPMS_OFF: 2606 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); 2607 if (amdgpu_crtc->enabled) { 2608 dce_v8_0_vga_enable(crtc, true); 2609 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2610 dce_v8_0_vga_enable(crtc, false); 2611 } 2612 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2613 amdgpu_crtc->enabled = false; 2614 break; 2615 } 2616 /* adjust pm to dpms */ 2617 amdgpu_pm_compute_clocks(adev); 2618 } 2619 2620 static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc) 2621 { 2622 /* disable crtc pair power gating before programming */ 2623 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2624 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2625 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2626 } 2627 2628 static void dce_v8_0_crtc_commit(struct drm_crtc *crtc) 2629 { 2630 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2631 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2632 } 2633 2634 static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) 2635 { 2636 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2637 struct drm_device *dev = crtc->dev; 2638 struct amdgpu_device *adev = dev->dev_private; 2639 struct amdgpu_atom_ss ss; 2640 int i; 2641 2642 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2643 if (crtc->primary->fb) { 2644 int r; 2645 struct amdgpu_framebuffer *amdgpu_fb; 2646 struct amdgpu_bo *rbo; 2647 2648 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2649 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2650 r = amdgpu_bo_reserve(rbo, false); 2651 if (unlikely(r)) 2652 DRM_ERROR("failed to reserve rbo before unpin\n"); 2653 else { 2654 amdgpu_bo_unpin(rbo); 2655 amdgpu_bo_unreserve(rbo); 2656 } 2657 } 2658 /* disable the GRPH */ 2659 dce_v8_0_grph_enable(crtc, false); 2660 2661 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2662 2663 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2664 if (adev->mode_info.crtcs[i] && 2665 adev->mode_info.crtcs[i]->enabled && 2666 i != amdgpu_crtc->crtc_id && 2667 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2668 /* one other crtc is using this pll don't turn 2669 * off the pll 2670 */ 2671 goto done; 2672 } 2673 } 2674 2675 switch (amdgpu_crtc->pll_id) { 2676 case ATOM_PPLL1: 2677 case ATOM_PPLL2: 2678 /* disable the ppll */ 2679 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2680 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2681 break; 2682 case ATOM_PPLL0: 2683 /* disable the ppll */ 2684 if ((adev->asic_type == CHIP_KAVERI) || 2685 (adev->asic_type == CHIP_BONAIRE) || 2686 (adev->asic_type == CHIP_HAWAII)) 2687 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2688 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2689 break; 2690 default: 2691 break; 2692 } 2693 done: 2694 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2695 amdgpu_crtc->adjusted_clock = 0; 2696 amdgpu_crtc->encoder = NULL; 2697 amdgpu_crtc->connector = NULL; 2698 } 2699 2700 static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc, 2701 struct drm_display_mode *mode, 2702 struct drm_display_mode *adjusted_mode, 2703 int x, int y, struct drm_framebuffer *old_fb) 2704 { 2705 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2706 2707 if (!amdgpu_crtc->adjusted_clock) 2708 return -EINVAL; 2709 2710 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2711 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2712 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2713 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2714 amdgpu_atombios_crtc_scaler_setup(crtc); 2715 /* update the hw version fpr dpm */ 2716 amdgpu_crtc->hw_mode = *adjusted_mode; 2717 2718 return 0; 2719 } 2720 2721 static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc, 2722 const struct drm_display_mode *mode, 2723 struct drm_display_mode *adjusted_mode) 2724 { 2725 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2726 struct drm_device *dev = crtc->dev; 2727 struct drm_encoder *encoder; 2728 2729 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2730 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2731 if (encoder->crtc == crtc) { 2732 amdgpu_crtc->encoder = encoder; 2733 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2734 break; 2735 } 2736 } 2737 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2738 amdgpu_crtc->encoder = NULL; 2739 amdgpu_crtc->connector = NULL; 2740 return false; 2741 } 2742 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2743 return false; 2744 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2745 return false; 2746 /* pick pll */ 2747 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc); 2748 /* if we can't get a PPLL for a non-DP encoder, fail */ 2749 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2750 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2751 return false; 2752 2753 return true; 2754 } 2755 2756 static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2757 struct drm_framebuffer *old_fb) 2758 { 2759 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2760 } 2761 2762 static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2763 struct drm_framebuffer *fb, 2764 int x, int y, enum mode_set_atomic state) 2765 { 2766 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1); 2767 } 2768 2769 static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = { 2770 .dpms = dce_v8_0_crtc_dpms, 2771 .mode_fixup = dce_v8_0_crtc_mode_fixup, 2772 .mode_set = dce_v8_0_crtc_mode_set, 2773 .mode_set_base = dce_v8_0_crtc_set_base, 2774 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic, 2775 .prepare = dce_v8_0_crtc_prepare, 2776 .commit = dce_v8_0_crtc_commit, 2777 .load_lut = dce_v8_0_crtc_load_lut, 2778 .disable = dce_v8_0_crtc_disable, 2779 }; 2780 2781 static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) 2782 { 2783 struct amdgpu_crtc *amdgpu_crtc; 2784 int i; 2785 2786 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2787 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2788 if (amdgpu_crtc == NULL) 2789 return -ENOMEM; 2790 2791 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); 2792 2793 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2794 amdgpu_crtc->crtc_id = index; 2795 amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); 2796 adev->mode_info.crtcs[index] = amdgpu_crtc; 2797 2798 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; 2799 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; 2800 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2801 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2802 2803 for (i = 0; i < 256; i++) { 2804 amdgpu_crtc->lut_r[i] = i << 2; 2805 amdgpu_crtc->lut_g[i] = i << 2; 2806 amdgpu_crtc->lut_b[i] = i << 2; 2807 } 2808 2809 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; 2810 2811 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2812 amdgpu_crtc->adjusted_clock = 0; 2813 amdgpu_crtc->encoder = NULL; 2814 amdgpu_crtc->connector = NULL; 2815 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs); 2816 2817 return 0; 2818 } 2819 2820 static int dce_v8_0_early_init(void *handle) 2821 { 2822 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2823 2824 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg; 2825 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg; 2826 2827 dce_v8_0_set_display_funcs(adev); 2828 dce_v8_0_set_irq_funcs(adev); 2829 2830 switch (adev->asic_type) { 2831 case CHIP_BONAIRE: 2832 case CHIP_HAWAII: 2833 adev->mode_info.num_crtc = 6; 2834 adev->mode_info.num_hpd = 6; 2835 adev->mode_info.num_dig = 6; 2836 break; 2837 case CHIP_KAVERI: 2838 adev->mode_info.num_crtc = 4; 2839 adev->mode_info.num_hpd = 6; 2840 adev->mode_info.num_dig = 7; 2841 break; 2842 case CHIP_KABINI: 2843 case CHIP_MULLINS: 2844 adev->mode_info.num_crtc = 2; 2845 adev->mode_info.num_hpd = 6; 2846 adev->mode_info.num_dig = 6; /* ? */ 2847 break; 2848 default: 2849 /* FIXME: not supported yet */ 2850 return -EINVAL; 2851 } 2852 2853 return 0; 2854 } 2855 2856 static int dce_v8_0_sw_init(void *handle) 2857 { 2858 int r, i; 2859 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2860 2861 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2862 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); 2863 if (r) 2864 return r; 2865 } 2866 2867 for (i = 8; i < 20; i += 2) { 2868 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); 2869 if (r) 2870 return r; 2871 } 2872 2873 /* HPD hotplug */ 2874 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); 2875 if (r) 2876 return r; 2877 2878 adev->mode_info.mode_config_initialized = true; 2879 2880 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; 2881 2882 adev->ddev->mode_config.max_width = 16384; 2883 adev->ddev->mode_config.max_height = 16384; 2884 2885 adev->ddev->mode_config.preferred_depth = 24; 2886 adev->ddev->mode_config.prefer_shadow = 1; 2887 2888 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2889 2890 r = amdgpu_modeset_create_props(adev); 2891 if (r) 2892 return r; 2893 2894 adev->ddev->mode_config.max_width = 16384; 2895 adev->ddev->mode_config.max_height = 16384; 2896 2897 /* allocate crtcs */ 2898 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2899 r = dce_v8_0_crtc_init(adev, i); 2900 if (r) 2901 return r; 2902 } 2903 2904 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2905 amdgpu_print_display_setup(adev->ddev); 2906 else 2907 return -EINVAL; 2908 2909 /* setup afmt */ 2910 dce_v8_0_afmt_init(adev); 2911 2912 r = dce_v8_0_audio_init(adev); 2913 if (r) 2914 return r; 2915 2916 drm_kms_helper_poll_init(adev->ddev); 2917 2918 return r; 2919 } 2920 2921 static int dce_v8_0_sw_fini(void *handle) 2922 { 2923 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2924 2925 kfree(adev->mode_info.bios_hardcoded_edid); 2926 2927 drm_kms_helper_poll_fini(adev->ddev); 2928 2929 dce_v8_0_audio_fini(adev); 2930 2931 dce_v8_0_afmt_fini(adev); 2932 2933 drm_mode_config_cleanup(adev->ddev); 2934 adev->mode_info.mode_config_initialized = false; 2935 2936 return 0; 2937 } 2938 2939 static int dce_v8_0_hw_init(void *handle) 2940 { 2941 int i; 2942 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2943 2944 /* init dig PHYs, disp eng pll */ 2945 amdgpu_atombios_encoder_init_dig(adev); 2946 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 2947 2948 /* initialize hpd */ 2949 dce_v8_0_hpd_init(adev); 2950 2951 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2952 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2953 } 2954 2955 dce_v8_0_pageflip_interrupt_init(adev); 2956 2957 return 0; 2958 } 2959 2960 static int dce_v8_0_hw_fini(void *handle) 2961 { 2962 int i; 2963 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2964 2965 dce_v8_0_hpd_fini(adev); 2966 2967 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2968 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2969 } 2970 2971 dce_v8_0_pageflip_interrupt_fini(adev); 2972 2973 return 0; 2974 } 2975 2976 static int dce_v8_0_suspend(void *handle) 2977 { 2978 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2979 2980 amdgpu_atombios_scratch_regs_save(adev); 2981 2982 dce_v8_0_hpd_fini(adev); 2983 2984 dce_v8_0_pageflip_interrupt_fini(adev); 2985 2986 return 0; 2987 } 2988 2989 static int dce_v8_0_resume(void *handle) 2990 { 2991 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2992 2993 amdgpu_atombios_scratch_regs_restore(adev); 2994 2995 /* init dig PHYs, disp eng pll */ 2996 amdgpu_atombios_encoder_init_dig(adev); 2997 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 2998 /* turn on the BL */ 2999 if (adev->mode_info.bl_encoder) { 3000 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3001 adev->mode_info.bl_encoder); 3002 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 3003 bl_level); 3004 } 3005 3006 /* initialize hpd */ 3007 dce_v8_0_hpd_init(adev); 3008 3009 dce_v8_0_pageflip_interrupt_init(adev); 3010 3011 return 0; 3012 } 3013 3014 static bool dce_v8_0_is_idle(void *handle) 3015 { 3016 return true; 3017 } 3018 3019 static int dce_v8_0_wait_for_idle(void *handle) 3020 { 3021 return 0; 3022 } 3023 3024 static void dce_v8_0_print_status(void *handle) 3025 { 3026 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3027 3028 dev_info(adev->dev, "DCE 8.x registers\n"); 3029 /* XXX todo */ 3030 } 3031 3032 static int dce_v8_0_soft_reset(void *handle) 3033 { 3034 u32 srbm_soft_reset = 0, tmp; 3035 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3036 3037 if (dce_v8_0_is_display_hung(adev)) 3038 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 3039 3040 if (srbm_soft_reset) { 3041 dce_v8_0_print_status((void *)adev); 3042 3043 tmp = RREG32(mmSRBM_SOFT_RESET); 3044 tmp |= srbm_soft_reset; 3045 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 3046 WREG32(mmSRBM_SOFT_RESET, tmp); 3047 tmp = RREG32(mmSRBM_SOFT_RESET); 3048 3049 udelay(50); 3050 3051 tmp &= ~srbm_soft_reset; 3052 WREG32(mmSRBM_SOFT_RESET, tmp); 3053 tmp = RREG32(mmSRBM_SOFT_RESET); 3054 3055 /* Wait a little for things to settle down */ 3056 udelay(50); 3057 dce_v8_0_print_status((void *)adev); 3058 } 3059 return 0; 3060 } 3061 3062 static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 3063 int crtc, 3064 enum amdgpu_interrupt_state state) 3065 { 3066 u32 reg_block, lb_interrupt_mask; 3067 3068 if (crtc >= adev->mode_info.num_crtc) { 3069 DRM_DEBUG("invalid crtc %d\n", crtc); 3070 return; 3071 } 3072 3073 switch (crtc) { 3074 case 0: 3075 reg_block = CRTC0_REGISTER_OFFSET; 3076 break; 3077 case 1: 3078 reg_block = CRTC1_REGISTER_OFFSET; 3079 break; 3080 case 2: 3081 reg_block = CRTC2_REGISTER_OFFSET; 3082 break; 3083 case 3: 3084 reg_block = CRTC3_REGISTER_OFFSET; 3085 break; 3086 case 4: 3087 reg_block = CRTC4_REGISTER_OFFSET; 3088 break; 3089 case 5: 3090 reg_block = CRTC5_REGISTER_OFFSET; 3091 break; 3092 default: 3093 DRM_DEBUG("invalid crtc %d\n", crtc); 3094 return; 3095 } 3096 3097 switch (state) { 3098 case AMDGPU_IRQ_STATE_DISABLE: 3099 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 3100 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK; 3101 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 3102 break; 3103 case AMDGPU_IRQ_STATE_ENABLE: 3104 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 3105 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK; 3106 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 3107 break; 3108 default: 3109 break; 3110 } 3111 } 3112 3113 static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 3114 int crtc, 3115 enum amdgpu_interrupt_state state) 3116 { 3117 u32 reg_block, lb_interrupt_mask; 3118 3119 if (crtc >= adev->mode_info.num_crtc) { 3120 DRM_DEBUG("invalid crtc %d\n", crtc); 3121 return; 3122 } 3123 3124 switch (crtc) { 3125 case 0: 3126 reg_block = CRTC0_REGISTER_OFFSET; 3127 break; 3128 case 1: 3129 reg_block = CRTC1_REGISTER_OFFSET; 3130 break; 3131 case 2: 3132 reg_block = CRTC2_REGISTER_OFFSET; 3133 break; 3134 case 3: 3135 reg_block = CRTC3_REGISTER_OFFSET; 3136 break; 3137 case 4: 3138 reg_block = CRTC4_REGISTER_OFFSET; 3139 break; 3140 case 5: 3141 reg_block = CRTC5_REGISTER_OFFSET; 3142 break; 3143 default: 3144 DRM_DEBUG("invalid crtc %d\n", crtc); 3145 return; 3146 } 3147 3148 switch (state) { 3149 case AMDGPU_IRQ_STATE_DISABLE: 3150 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 3151 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK; 3152 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 3153 break; 3154 case AMDGPU_IRQ_STATE_ENABLE: 3155 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 3156 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK; 3157 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 3158 break; 3159 default: 3160 break; 3161 } 3162 } 3163 3164 static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, 3165 struct amdgpu_irq_src *src, 3166 unsigned type, 3167 enum amdgpu_interrupt_state state) 3168 { 3169 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; 3170 3171 switch (type) { 3172 case AMDGPU_HPD_1: 3173 dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; 3174 break; 3175 case AMDGPU_HPD_2: 3176 dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; 3177 break; 3178 case AMDGPU_HPD_3: 3179 dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; 3180 break; 3181 case AMDGPU_HPD_4: 3182 dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; 3183 break; 3184 case AMDGPU_HPD_5: 3185 dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; 3186 break; 3187 case AMDGPU_HPD_6: 3188 dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; 3189 break; 3190 default: 3191 DRM_DEBUG("invalid hdp %d\n", type); 3192 return 0; 3193 } 3194 3195 switch (state) { 3196 case AMDGPU_IRQ_STATE_DISABLE: 3197 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); 3198 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 3199 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); 3200 break; 3201 case AMDGPU_IRQ_STATE_ENABLE: 3202 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); 3203 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 3204 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); 3205 break; 3206 default: 3207 break; 3208 } 3209 3210 return 0; 3211 } 3212 3213 static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev, 3214 struct amdgpu_irq_src *src, 3215 unsigned type, 3216 enum amdgpu_interrupt_state state) 3217 { 3218 switch (type) { 3219 case AMDGPU_CRTC_IRQ_VBLANK1: 3220 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state); 3221 break; 3222 case AMDGPU_CRTC_IRQ_VBLANK2: 3223 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state); 3224 break; 3225 case AMDGPU_CRTC_IRQ_VBLANK3: 3226 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state); 3227 break; 3228 case AMDGPU_CRTC_IRQ_VBLANK4: 3229 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state); 3230 break; 3231 case AMDGPU_CRTC_IRQ_VBLANK5: 3232 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state); 3233 break; 3234 case AMDGPU_CRTC_IRQ_VBLANK6: 3235 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state); 3236 break; 3237 case AMDGPU_CRTC_IRQ_VLINE1: 3238 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state); 3239 break; 3240 case AMDGPU_CRTC_IRQ_VLINE2: 3241 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state); 3242 break; 3243 case AMDGPU_CRTC_IRQ_VLINE3: 3244 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state); 3245 break; 3246 case AMDGPU_CRTC_IRQ_VLINE4: 3247 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state); 3248 break; 3249 case AMDGPU_CRTC_IRQ_VLINE5: 3250 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state); 3251 break; 3252 case AMDGPU_CRTC_IRQ_VLINE6: 3253 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state); 3254 break; 3255 default: 3256 break; 3257 } 3258 return 0; 3259 } 3260 3261 static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, 3262 struct amdgpu_irq_src *source, 3263 struct amdgpu_iv_entry *entry) 3264 { 3265 unsigned crtc = entry->src_id - 1; 3266 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3267 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 3268 3269 switch (entry->src_data) { 3270 case 0: /* vblank */ 3271 if (disp_int & interrupt_status_offsets[crtc].vblank) 3272 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); 3273 else 3274 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3275 3276 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3277 drm_handle_vblank(adev->ddev, crtc); 3278 } 3279 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3280 3281 break; 3282 case 1: /* vline */ 3283 if (disp_int & interrupt_status_offsets[crtc].vline) 3284 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); 3285 else 3286 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3287 3288 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3289 3290 break; 3291 default: 3292 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3293 break; 3294 } 3295 3296 return 0; 3297 } 3298 3299 static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, 3300 struct amdgpu_irq_src *src, 3301 unsigned type, 3302 enum amdgpu_interrupt_state state) 3303 { 3304 u32 reg, reg_block; 3305 /* now deal with page flip IRQ */ 3306 switch (type) { 3307 case AMDGPU_PAGEFLIP_IRQ_D1: 3308 reg_block = CRTC0_REGISTER_OFFSET; 3309 break; 3310 case AMDGPU_PAGEFLIP_IRQ_D2: 3311 reg_block = CRTC1_REGISTER_OFFSET; 3312 break; 3313 case AMDGPU_PAGEFLIP_IRQ_D3: 3314 reg_block = CRTC2_REGISTER_OFFSET; 3315 break; 3316 case AMDGPU_PAGEFLIP_IRQ_D4: 3317 reg_block = CRTC3_REGISTER_OFFSET; 3318 break; 3319 case AMDGPU_PAGEFLIP_IRQ_D5: 3320 reg_block = CRTC4_REGISTER_OFFSET; 3321 break; 3322 case AMDGPU_PAGEFLIP_IRQ_D6: 3323 reg_block = CRTC5_REGISTER_OFFSET; 3324 break; 3325 default: 3326 DRM_ERROR("invalid pageflip crtc %d\n", type); 3327 return -EINVAL; 3328 } 3329 3330 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); 3331 if (state == AMDGPU_IRQ_STATE_DISABLE) 3332 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3333 else 3334 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3335 3336 return 0; 3337 } 3338 3339 static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, 3340 struct amdgpu_irq_src *source, 3341 struct amdgpu_iv_entry *entry) 3342 { 3343 int reg_block; 3344 unsigned long flags; 3345 unsigned crtc_id; 3346 struct amdgpu_crtc *amdgpu_crtc; 3347 struct amdgpu_flip_work *works; 3348 3349 crtc_id = (entry->src_id - 8) >> 1; 3350 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3351 3352 /* ack the interrupt */ 3353 switch(crtc_id){ 3354 case AMDGPU_PAGEFLIP_IRQ_D1: 3355 reg_block = CRTC0_REGISTER_OFFSET; 3356 break; 3357 case AMDGPU_PAGEFLIP_IRQ_D2: 3358 reg_block = CRTC1_REGISTER_OFFSET; 3359 break; 3360 case AMDGPU_PAGEFLIP_IRQ_D3: 3361 reg_block = CRTC2_REGISTER_OFFSET; 3362 break; 3363 case AMDGPU_PAGEFLIP_IRQ_D4: 3364 reg_block = CRTC3_REGISTER_OFFSET; 3365 break; 3366 case AMDGPU_PAGEFLIP_IRQ_D5: 3367 reg_block = CRTC4_REGISTER_OFFSET; 3368 break; 3369 case AMDGPU_PAGEFLIP_IRQ_D6: 3370 reg_block = CRTC5_REGISTER_OFFSET; 3371 break; 3372 default: 3373 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3374 return -EINVAL; 3375 } 3376 3377 if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3378 WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3379 3380 /* IRQ could occur when in initial stage */ 3381 if (amdgpu_crtc == NULL) 3382 return 0; 3383 3384 spin_lock_irqsave(&adev->ddev->event_lock, flags); 3385 works = amdgpu_crtc->pflip_works; 3386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 3387 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3388 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3389 amdgpu_crtc->pflip_status, 3390 AMDGPU_FLIP_SUBMITTED); 3391 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3392 return 0; 3393 } 3394 3395 /* page flip completed. clean up */ 3396 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3397 amdgpu_crtc->pflip_works = NULL; 3398 3399 /* wakeup usersapce */ 3400 if (works->event) 3401 drm_send_vblank_event(adev->ddev, crtc_id, works->event); 3402 3403 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3404 3405 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3406 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3407 3408 return 0; 3409 } 3410 3411 static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, 3412 struct amdgpu_irq_src *source, 3413 struct amdgpu_iv_entry *entry) 3414 { 3415 uint32_t disp_int, mask, int_control, tmp; 3416 unsigned hpd; 3417 3418 if (entry->src_data >= adev->mode_info.num_hpd) { 3419 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3420 return 0; 3421 } 3422 3423 hpd = entry->src_data; 3424 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3425 mask = interrupt_status_offsets[hpd].hpd; 3426 int_control = hpd_int_control_offsets[hpd]; 3427 3428 if (disp_int & mask) { 3429 tmp = RREG32(int_control); 3430 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3431 WREG32(int_control, tmp); 3432 schedule_work(&adev->hotplug_work); 3433 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3434 } 3435 3436 return 0; 3437 3438 } 3439 3440 static int dce_v8_0_set_clockgating_state(void *handle, 3441 enum amd_clockgating_state state) 3442 { 3443 return 0; 3444 } 3445 3446 static int dce_v8_0_set_powergating_state(void *handle, 3447 enum amd_powergating_state state) 3448 { 3449 return 0; 3450 } 3451 3452 const struct amd_ip_funcs dce_v8_0_ip_funcs = { 3453 .early_init = dce_v8_0_early_init, 3454 .late_init = NULL, 3455 .sw_init = dce_v8_0_sw_init, 3456 .sw_fini = dce_v8_0_sw_fini, 3457 .hw_init = dce_v8_0_hw_init, 3458 .hw_fini = dce_v8_0_hw_fini, 3459 .suspend = dce_v8_0_suspend, 3460 .resume = dce_v8_0_resume, 3461 .is_idle = dce_v8_0_is_idle, 3462 .wait_for_idle = dce_v8_0_wait_for_idle, 3463 .soft_reset = dce_v8_0_soft_reset, 3464 .print_status = dce_v8_0_print_status, 3465 .set_clockgating_state = dce_v8_0_set_clockgating_state, 3466 .set_powergating_state = dce_v8_0_set_powergating_state, 3467 }; 3468 3469 static void 3470 dce_v8_0_encoder_mode_set(struct drm_encoder *encoder, 3471 struct drm_display_mode *mode, 3472 struct drm_display_mode *adjusted_mode) 3473 { 3474 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3475 3476 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3477 3478 /* need to call this here rather than in prepare() since we need some crtc info */ 3479 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3480 3481 /* set scaler clears this on some chips */ 3482 dce_v8_0_set_interleave(encoder->crtc, mode); 3483 3484 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 3485 dce_v8_0_afmt_enable(encoder, true); 3486 dce_v8_0_afmt_setmode(encoder, adjusted_mode); 3487 } 3488 } 3489 3490 static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) 3491 { 3492 struct amdgpu_device *adev = encoder->dev->dev_private; 3493 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3494 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3495 3496 if ((amdgpu_encoder->active_device & 3497 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3498 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3499 ENCODER_OBJECT_ID_NONE)) { 3500 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3501 if (dig) { 3502 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder); 3503 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3504 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3505 } 3506 } 3507 3508 amdgpu_atombios_scratch_regs_lock(adev, true); 3509 3510 if (connector) { 3511 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3512 3513 /* select the clock/data port if it uses a router */ 3514 if (amdgpu_connector->router.cd_valid) 3515 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3516 3517 /* turn eDP panel on for mode set */ 3518 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3519 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3520 ATOM_TRANSMITTER_ACTION_POWER_ON); 3521 } 3522 3523 /* this is needed for the pll/ss setup to work correctly in some cases */ 3524 amdgpu_atombios_encoder_set_crtc_source(encoder); 3525 /* set up the FMT blocks */ 3526 dce_v8_0_program_fmt(encoder); 3527 } 3528 3529 static void dce_v8_0_encoder_commit(struct drm_encoder *encoder) 3530 { 3531 struct drm_device *dev = encoder->dev; 3532 struct amdgpu_device *adev = dev->dev_private; 3533 3534 /* need to call this here as we need the crtc set up */ 3535 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3536 amdgpu_atombios_scratch_regs_lock(adev, false); 3537 } 3538 3539 static void dce_v8_0_encoder_disable(struct drm_encoder *encoder) 3540 { 3541 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3542 struct amdgpu_encoder_atom_dig *dig; 3543 3544 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3545 3546 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3547 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 3548 dce_v8_0_afmt_enable(encoder, false); 3549 dig = amdgpu_encoder->enc_priv; 3550 dig->dig_encoder = -1; 3551 } 3552 amdgpu_encoder->active_device = 0; 3553 } 3554 3555 /* these are handled by the primary encoders */ 3556 static void dce_v8_0_ext_prepare(struct drm_encoder *encoder) 3557 { 3558 3559 } 3560 3561 static void dce_v8_0_ext_commit(struct drm_encoder *encoder) 3562 { 3563 3564 } 3565 3566 static void 3567 dce_v8_0_ext_mode_set(struct drm_encoder *encoder, 3568 struct drm_display_mode *mode, 3569 struct drm_display_mode *adjusted_mode) 3570 { 3571 3572 } 3573 3574 static void dce_v8_0_ext_disable(struct drm_encoder *encoder) 3575 { 3576 3577 } 3578 3579 static void 3580 dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode) 3581 { 3582 3583 } 3584 3585 static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder, 3586 const struct drm_display_mode *mode, 3587 struct drm_display_mode *adjusted_mode) 3588 { 3589 return true; 3590 } 3591 3592 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = { 3593 .dpms = dce_v8_0_ext_dpms, 3594 .mode_fixup = dce_v8_0_ext_mode_fixup, 3595 .prepare = dce_v8_0_ext_prepare, 3596 .mode_set = dce_v8_0_ext_mode_set, 3597 .commit = dce_v8_0_ext_commit, 3598 .disable = dce_v8_0_ext_disable, 3599 /* no detect for TMDS/LVDS yet */ 3600 }; 3601 3602 static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = { 3603 .dpms = amdgpu_atombios_encoder_dpms, 3604 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3605 .prepare = dce_v8_0_encoder_prepare, 3606 .mode_set = dce_v8_0_encoder_mode_set, 3607 .commit = dce_v8_0_encoder_commit, 3608 .disable = dce_v8_0_encoder_disable, 3609 .detect = amdgpu_atombios_encoder_dig_detect, 3610 }; 3611 3612 static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = { 3613 .dpms = amdgpu_atombios_encoder_dpms, 3614 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3615 .prepare = dce_v8_0_encoder_prepare, 3616 .mode_set = dce_v8_0_encoder_mode_set, 3617 .commit = dce_v8_0_encoder_commit, 3618 .detect = amdgpu_atombios_encoder_dac_detect, 3619 }; 3620 3621 static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder) 3622 { 3623 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3624 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3625 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3626 kfree(amdgpu_encoder->enc_priv); 3627 drm_encoder_cleanup(encoder); 3628 kfree(amdgpu_encoder); 3629 } 3630 3631 static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = { 3632 .destroy = dce_v8_0_encoder_destroy, 3633 }; 3634 3635 static void dce_v8_0_encoder_add(struct amdgpu_device *adev, 3636 uint32_t encoder_enum, 3637 uint32_t supported_device, 3638 u16 caps) 3639 { 3640 struct drm_device *dev = adev->ddev; 3641 struct drm_encoder *encoder; 3642 struct amdgpu_encoder *amdgpu_encoder; 3643 3644 /* see if we already added it */ 3645 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3646 amdgpu_encoder = to_amdgpu_encoder(encoder); 3647 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3648 amdgpu_encoder->devices |= supported_device; 3649 return; 3650 } 3651 3652 } 3653 3654 /* add a new one */ 3655 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3656 if (!amdgpu_encoder) 3657 return; 3658 3659 encoder = &amdgpu_encoder->base; 3660 switch (adev->mode_info.num_crtc) { 3661 case 1: 3662 encoder->possible_crtcs = 0x1; 3663 break; 3664 case 2: 3665 default: 3666 encoder->possible_crtcs = 0x3; 3667 break; 3668 case 4: 3669 encoder->possible_crtcs = 0xf; 3670 break; 3671 case 6: 3672 encoder->possible_crtcs = 0x3f; 3673 break; 3674 } 3675 3676 amdgpu_encoder->enc_priv = NULL; 3677 3678 amdgpu_encoder->encoder_enum = encoder_enum; 3679 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3680 amdgpu_encoder->devices = supported_device; 3681 amdgpu_encoder->rmx_type = RMX_OFF; 3682 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3683 amdgpu_encoder->is_ext_encoder = false; 3684 amdgpu_encoder->caps = caps; 3685 3686 switch (amdgpu_encoder->encoder_id) { 3687 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3688 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3689 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3690 DRM_MODE_ENCODER_DAC); 3691 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs); 3692 break; 3693 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3694 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3695 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3696 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3697 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3698 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3699 amdgpu_encoder->rmx_type = RMX_FULL; 3700 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3701 DRM_MODE_ENCODER_LVDS); 3702 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3703 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3704 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3705 DRM_MODE_ENCODER_DAC); 3706 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3707 } else { 3708 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3709 DRM_MODE_ENCODER_TMDS); 3710 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3711 } 3712 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs); 3713 break; 3714 case ENCODER_OBJECT_ID_SI170B: 3715 case ENCODER_OBJECT_ID_CH7303: 3716 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3717 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3718 case ENCODER_OBJECT_ID_TITFP513: 3719 case ENCODER_OBJECT_ID_VT1623: 3720 case ENCODER_OBJECT_ID_HDMI_SI1930: 3721 case ENCODER_OBJECT_ID_TRAVIS: 3722 case ENCODER_OBJECT_ID_NUTMEG: 3723 /* these are handled by the primary encoders */ 3724 amdgpu_encoder->is_ext_encoder = true; 3725 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3726 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3727 DRM_MODE_ENCODER_LVDS); 3728 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3729 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3730 DRM_MODE_ENCODER_DAC); 3731 else 3732 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3733 DRM_MODE_ENCODER_TMDS); 3734 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs); 3735 break; 3736 } 3737 } 3738 3739 static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { 3740 .set_vga_render_state = &dce_v8_0_set_vga_render_state, 3741 .bandwidth_update = &dce_v8_0_bandwidth_update, 3742 .vblank_get_counter = &dce_v8_0_vblank_get_counter, 3743 .vblank_wait = &dce_v8_0_vblank_wait, 3744 .is_display_hung = &dce_v8_0_is_display_hung, 3745 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3746 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3747 .hpd_sense = &dce_v8_0_hpd_sense, 3748 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity, 3749 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg, 3750 .page_flip = &dce_v8_0_page_flip, 3751 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos, 3752 .add_encoder = &dce_v8_0_encoder_add, 3753 .add_connector = &amdgpu_connector_add, 3754 .stop_mc_access = &dce_v8_0_stop_mc_access, 3755 .resume_mc_access = &dce_v8_0_resume_mc_access, 3756 }; 3757 3758 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) 3759 { 3760 if (adev->mode_info.funcs == NULL) 3761 adev->mode_info.funcs = &dce_v8_0_display_funcs; 3762 } 3763 3764 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = { 3765 .set = dce_v8_0_set_crtc_interrupt_state, 3766 .process = dce_v8_0_crtc_irq, 3767 }; 3768 3769 static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = { 3770 .set = dce_v8_0_set_pageflip_interrupt_state, 3771 .process = dce_v8_0_pageflip_irq, 3772 }; 3773 3774 static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = { 3775 .set = dce_v8_0_set_hpd_interrupt_state, 3776 .process = dce_v8_0_hpd_irq, 3777 }; 3778 3779 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev) 3780 { 3781 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; 3782 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs; 3783 3784 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; 3785 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs; 3786 3787 adev->hpd_irq.num_types = AMDGPU_HPD_LAST; 3788 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; 3789 } 3790