xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision 58f6259b7a08f8d47d4629609703d358b042f0fd)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "i915_reg.h"
27 #include "intel_de.h"
28 #include "intel_display_types.h"
29 #include "intel_dkl_phy.h"
30 #include "intel_dkl_phy_regs.h"
31 #include "intel_dpio_phy.h"
32 #include "intel_dpll.h"
33 #include "intel_dpll_mgr.h"
34 #include "intel_hti.h"
35 #include "intel_mg_phy_regs.h"
36 #include "intel_pch_refclk.h"
37 #include "intel_tc.h"
38 
39 /**
40  * DOC: Display PLLs
41  *
42  * Display PLLs used for driving outputs vary by platform. While some have
43  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
44  * from a pool. In the latter scenario, it is possible that multiple pipes
45  * share a PLL if their configurations match.
46  *
47  * This file provides an abstraction over display PLLs. The function
48  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
49  * users of a PLL are tracked and that tracking is integrated with the atomic
50  * modset interface. During an atomic operation, required PLLs can be reserved
51  * for a given CRTC and encoder configuration by calling
52  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
53  * with intel_release_shared_dplls().
54  * Changes to the users are first staged in the atomic state, and then made
55  * effective by calling intel_shared_dpll_swap_state() during the atomic
56  * commit phase.
57  */
58 
59 /* platform specific hooks for managing DPLLs */
60 struct intel_shared_dpll_funcs {
61 	/*
62 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
63 	 * the pll is not already enabled.
64 	 */
65 	void (*enable)(struct drm_i915_private *i915,
66 		       struct intel_shared_dpll *pll);
67 
68 	/*
69 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
70 	 * only when it is safe to disable the pll, i.e., there are no more
71 	 * tracked users for it.
72 	 */
73 	void (*disable)(struct drm_i915_private *i915,
74 			struct intel_shared_dpll *pll);
75 
76 	/*
77 	 * Hook for reading the values currently programmed to the DPLL
78 	 * registers. This is used for initial hw state readout and state
79 	 * verification after a mode set.
80 	 */
81 	bool (*get_hw_state)(struct drm_i915_private *i915,
82 			     struct intel_shared_dpll *pll,
83 			     struct intel_dpll_hw_state *hw_state);
84 
85 	/*
86 	 * Hook for calculating the pll's output frequency based on its passed
87 	 * in state.
88 	 */
89 	int (*get_freq)(struct drm_i915_private *i915,
90 			const struct intel_shared_dpll *pll,
91 			const struct intel_dpll_hw_state *pll_state);
92 };
93 
94 struct intel_dpll_mgr {
95 	const struct dpll_info *dpll_info;
96 
97 	int (*compute_dplls)(struct intel_atomic_state *state,
98 			     struct intel_crtc *crtc,
99 			     struct intel_encoder *encoder);
100 	int (*get_dplls)(struct intel_atomic_state *state,
101 			 struct intel_crtc *crtc,
102 			 struct intel_encoder *encoder);
103 	void (*put_dplls)(struct intel_atomic_state *state,
104 			  struct intel_crtc *crtc);
105 	void (*update_active_dpll)(struct intel_atomic_state *state,
106 				   struct intel_crtc *crtc,
107 				   struct intel_encoder *encoder);
108 	void (*update_ref_clks)(struct drm_i915_private *i915);
109 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
110 			      const struct intel_dpll_hw_state *hw_state);
111 };
112 
113 static void
114 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
115 				  struct intel_shared_dpll_state *shared_dpll)
116 {
117 	enum intel_dpll_id i;
118 
119 	/* Copy shared dpll state */
120 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
121 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
122 
123 		shared_dpll[i] = pll->state;
124 	}
125 }
126 
127 static struct intel_shared_dpll_state *
128 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
129 {
130 	struct intel_atomic_state *state = to_intel_atomic_state(s);
131 
132 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
133 
134 	if (!state->dpll_set) {
135 		state->dpll_set = true;
136 
137 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
138 						  state->shared_dpll);
139 	}
140 
141 	return state->shared_dpll;
142 }
143 
144 /**
145  * intel_get_shared_dpll_by_id - get a DPLL given its id
146  * @dev_priv: i915 device instance
147  * @id: pll id
148  *
149  * Returns:
150  * A pointer to the DPLL with @id
151  */
152 struct intel_shared_dpll *
153 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
154 			    enum intel_dpll_id id)
155 {
156 	return &dev_priv->display.dpll.shared_dplls[id];
157 }
158 
159 /* For ILK+ */
160 void assert_shared_dpll(struct drm_i915_private *dev_priv,
161 			struct intel_shared_dpll *pll,
162 			bool state)
163 {
164 	bool cur_state;
165 	struct intel_dpll_hw_state hw_state;
166 
167 	if (drm_WARN(&dev_priv->drm, !pll,
168 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
169 		return;
170 
171 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
172 	I915_STATE_WARN(dev_priv, cur_state != state,
173 			"%s assertion failure (expected %s, current %s)\n",
174 			pll->info->name, str_on_off(state),
175 			str_on_off(cur_state));
176 }
177 
178 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
179 {
180 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
181 }
182 
183 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
184 {
185 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
186 }
187 
188 static i915_reg_t
189 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
190 			   struct intel_shared_dpll *pll)
191 {
192 	if (IS_DG1(i915))
193 		return DG1_DPLL_ENABLE(pll->info->id);
194 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
195 		return MG_PLL_ENABLE(0);
196 
197 	return ICL_DPLL_ENABLE(pll->info->id);
198 }
199 
200 static i915_reg_t
201 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
202 			struct intel_shared_dpll *pll)
203 {
204 	const enum intel_dpll_id id = pll->info->id;
205 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
206 
207 	if (IS_ALDERLAKE_P(i915))
208 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
209 
210 	return MG_PLL_ENABLE(tc_port);
211 }
212 
213 /**
214  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
215  * @crtc_state: CRTC, and its state, which has a shared DPLL
216  *
217  * Enable the shared DPLL used by @crtc.
218  */
219 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
220 {
221 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
222 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
223 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
224 	unsigned int pipe_mask = BIT(crtc->pipe);
225 	unsigned int old_mask;
226 
227 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
228 		return;
229 
230 	mutex_lock(&dev_priv->display.dpll.lock);
231 	old_mask = pll->active_mask;
232 
233 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
234 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
235 		goto out;
236 
237 	pll->active_mask |= pipe_mask;
238 
239 	drm_dbg_kms(&dev_priv->drm,
240 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
241 		    pll->info->name, pll->active_mask, pll->on,
242 		    crtc->base.base.id, crtc->base.name);
243 
244 	if (old_mask) {
245 		drm_WARN_ON(&dev_priv->drm, !pll->on);
246 		assert_shared_dpll_enabled(dev_priv, pll);
247 		goto out;
248 	}
249 	drm_WARN_ON(&dev_priv->drm, pll->on);
250 
251 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
252 	pll->info->funcs->enable(dev_priv, pll);
253 	pll->on = true;
254 
255 out:
256 	mutex_unlock(&dev_priv->display.dpll.lock);
257 }
258 
259 /**
260  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
261  * @crtc_state: CRTC, and its state, which has a shared DPLL
262  *
263  * Disable the shared DPLL used by @crtc.
264  */
265 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
266 {
267 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
268 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
269 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
270 	unsigned int pipe_mask = BIT(crtc->pipe);
271 
272 	/* PCH only available on ILK+ */
273 	if (DISPLAY_VER(dev_priv) < 5)
274 		return;
275 
276 	if (pll == NULL)
277 		return;
278 
279 	mutex_lock(&dev_priv->display.dpll.lock);
280 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
281 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
282 		     crtc->base.base.id, crtc->base.name))
283 		goto out;
284 
285 	drm_dbg_kms(&dev_priv->drm,
286 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
287 		    pll->info->name, pll->active_mask, pll->on,
288 		    crtc->base.base.id, crtc->base.name);
289 
290 	assert_shared_dpll_enabled(dev_priv, pll);
291 	drm_WARN_ON(&dev_priv->drm, !pll->on);
292 
293 	pll->active_mask &= ~pipe_mask;
294 	if (pll->active_mask)
295 		goto out;
296 
297 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
298 	pll->info->funcs->disable(dev_priv, pll);
299 	pll->on = false;
300 
301 out:
302 	mutex_unlock(&dev_priv->display.dpll.lock);
303 }
304 
305 static struct intel_shared_dpll *
306 intel_find_shared_dpll(struct intel_atomic_state *state,
307 		       const struct intel_crtc *crtc,
308 		       const struct intel_dpll_hw_state *pll_state,
309 		       unsigned long dpll_mask)
310 {
311 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
312 	struct intel_shared_dpll *pll, *unused_pll = NULL;
313 	struct intel_shared_dpll_state *shared_dpll;
314 	enum intel_dpll_id i;
315 
316 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
317 
318 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
319 
320 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
321 		pll = &dev_priv->display.dpll.shared_dplls[i];
322 
323 		/* Only want to check enabled timings first */
324 		if (shared_dpll[i].pipe_mask == 0) {
325 			if (!unused_pll)
326 				unused_pll = pll;
327 			continue;
328 		}
329 
330 		if (memcmp(pll_state,
331 			   &shared_dpll[i].hw_state,
332 			   sizeof(*pll_state)) == 0) {
333 			drm_dbg_kms(&dev_priv->drm,
334 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
335 				    crtc->base.base.id, crtc->base.name,
336 				    pll->info->name,
337 				    shared_dpll[i].pipe_mask,
338 				    pll->active_mask);
339 			return pll;
340 		}
341 	}
342 
343 	/* Ok no matching timings, maybe there's a free one? */
344 	if (unused_pll) {
345 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
346 			    crtc->base.base.id, crtc->base.name,
347 			    unused_pll->info->name);
348 		return unused_pll;
349 	}
350 
351 	return NULL;
352 }
353 
354 /**
355  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
356  * @crtc: CRTC on which behalf the reference is taken
357  * @pll: DPLL for which the reference is taken
358  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
359  *
360  * Take a reference for @pll tracking the use of it by @crtc.
361  */
362 static void
363 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
364 				 const struct intel_shared_dpll *pll,
365 				 struct intel_shared_dpll_state *shared_dpll_state)
366 {
367 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
368 
369 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
370 
371 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
372 
373 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
374 		    crtc->base.base.id, crtc->base.name, pll->info->name);
375 }
376 
377 static void
378 intel_reference_shared_dpll(struct intel_atomic_state *state,
379 			    const struct intel_crtc *crtc,
380 			    const struct intel_shared_dpll *pll,
381 			    const struct intel_dpll_hw_state *pll_state)
382 {
383 	struct intel_shared_dpll_state *shared_dpll;
384 	const enum intel_dpll_id id = pll->info->id;
385 
386 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
387 
388 	if (shared_dpll[id].pipe_mask == 0)
389 		shared_dpll[id].hw_state = *pll_state;
390 
391 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
392 }
393 
394 /**
395  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
396  * @crtc: CRTC on which behalf the reference is dropped
397  * @pll: DPLL for which the reference is dropped
398  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
399  *
400  * Drop a reference for @pll tracking the end of use of it by @crtc.
401  */
402 void
403 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
404 				   const struct intel_shared_dpll *pll,
405 				   struct intel_shared_dpll_state *shared_dpll_state)
406 {
407 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
408 
409 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
410 
411 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
412 
413 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
414 		    crtc->base.base.id, crtc->base.name, pll->info->name);
415 }
416 
417 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
418 					  const struct intel_crtc *crtc,
419 					  const struct intel_shared_dpll *pll)
420 {
421 	struct intel_shared_dpll_state *shared_dpll;
422 	const enum intel_dpll_id id = pll->info->id;
423 
424 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
425 
426 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
427 }
428 
429 static void intel_put_dpll(struct intel_atomic_state *state,
430 			   struct intel_crtc *crtc)
431 {
432 	const struct intel_crtc_state *old_crtc_state =
433 		intel_atomic_get_old_crtc_state(state, crtc);
434 	struct intel_crtc_state *new_crtc_state =
435 		intel_atomic_get_new_crtc_state(state, crtc);
436 
437 	new_crtc_state->shared_dpll = NULL;
438 
439 	if (!old_crtc_state->shared_dpll)
440 		return;
441 
442 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
443 }
444 
445 /**
446  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
447  * @state: atomic state
448  *
449  * This is the dpll version of drm_atomic_helper_swap_state() since the
450  * helper does not handle driver-specific global state.
451  *
452  * For consistency with atomic helpers this function does a complete swap,
453  * i.e. it also puts the current state into @state, even though there is no
454  * need for that at this moment.
455  */
456 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
457 {
458 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
459 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
460 	enum intel_dpll_id i;
461 
462 	if (!state->dpll_set)
463 		return;
464 
465 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
466 		struct intel_shared_dpll *pll =
467 			&dev_priv->display.dpll.shared_dplls[i];
468 
469 		swap(pll->state, shared_dpll[i]);
470 	}
471 }
472 
473 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
474 				      struct intel_shared_dpll *pll,
475 				      struct intel_dpll_hw_state *hw_state)
476 {
477 	const enum intel_dpll_id id = pll->info->id;
478 	intel_wakeref_t wakeref;
479 	u32 val;
480 
481 	wakeref = intel_display_power_get_if_enabled(dev_priv,
482 						     POWER_DOMAIN_DISPLAY_CORE);
483 	if (!wakeref)
484 		return false;
485 
486 	val = intel_de_read(dev_priv, PCH_DPLL(id));
487 	hw_state->dpll = val;
488 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
489 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
490 
491 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
492 
493 	return val & DPLL_VCO_ENABLE;
494 }
495 
496 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
497 {
498 	u32 val;
499 	bool enabled;
500 
501 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
502 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
503 			    DREF_SUPERSPREAD_SOURCE_MASK));
504 	I915_STATE_WARN(dev_priv, !enabled,
505 			"PCH refclk assertion failure, should be active but is disabled\n");
506 }
507 
508 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
509 				struct intel_shared_dpll *pll)
510 {
511 	const enum intel_dpll_id id = pll->info->id;
512 
513 	/* PCH refclock must be enabled first */
514 	ibx_assert_pch_refclk_enabled(dev_priv);
515 
516 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
517 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
518 
519 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
520 
521 	/* Wait for the clocks to stabilize. */
522 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
523 	udelay(150);
524 
525 	/* The pixel multiplier can only be updated once the
526 	 * DPLL is enabled and the clocks are stable.
527 	 *
528 	 * So write it again.
529 	 */
530 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
531 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
532 	udelay(200);
533 }
534 
535 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
536 				 struct intel_shared_dpll *pll)
537 {
538 	const enum intel_dpll_id id = pll->info->id;
539 
540 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
541 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
542 	udelay(200);
543 }
544 
545 static int ibx_compute_dpll(struct intel_atomic_state *state,
546 			    struct intel_crtc *crtc,
547 			    struct intel_encoder *encoder)
548 {
549 	return 0;
550 }
551 
552 static int ibx_get_dpll(struct intel_atomic_state *state,
553 			struct intel_crtc *crtc,
554 			struct intel_encoder *encoder)
555 {
556 	struct intel_crtc_state *crtc_state =
557 		intel_atomic_get_new_crtc_state(state, crtc);
558 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
559 	struct intel_shared_dpll *pll;
560 	enum intel_dpll_id i;
561 
562 	if (HAS_PCH_IBX(dev_priv)) {
563 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
564 		i = (enum intel_dpll_id) crtc->pipe;
565 		pll = &dev_priv->display.dpll.shared_dplls[i];
566 
567 		drm_dbg_kms(&dev_priv->drm,
568 			    "[CRTC:%d:%s] using pre-allocated %s\n",
569 			    crtc->base.base.id, crtc->base.name,
570 			    pll->info->name);
571 	} else {
572 		pll = intel_find_shared_dpll(state, crtc,
573 					     &crtc_state->dpll_hw_state,
574 					     BIT(DPLL_ID_PCH_PLL_B) |
575 					     BIT(DPLL_ID_PCH_PLL_A));
576 	}
577 
578 	if (!pll)
579 		return -EINVAL;
580 
581 	/* reference the pll */
582 	intel_reference_shared_dpll(state, crtc,
583 				    pll, &crtc_state->dpll_hw_state);
584 
585 	crtc_state->shared_dpll = pll;
586 
587 	return 0;
588 }
589 
590 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
591 			      const struct intel_dpll_hw_state *hw_state)
592 {
593 	drm_dbg_kms(&dev_priv->drm,
594 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
595 		    "fp0: 0x%x, fp1: 0x%x\n",
596 		    hw_state->dpll,
597 		    hw_state->dpll_md,
598 		    hw_state->fp0,
599 		    hw_state->fp1);
600 }
601 
602 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
603 	.enable = ibx_pch_dpll_enable,
604 	.disable = ibx_pch_dpll_disable,
605 	.get_hw_state = ibx_pch_dpll_get_hw_state,
606 };
607 
608 static const struct dpll_info pch_plls[] = {
609 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
610 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
611 	{ },
612 };
613 
614 static const struct intel_dpll_mgr pch_pll_mgr = {
615 	.dpll_info = pch_plls,
616 	.compute_dplls = ibx_compute_dpll,
617 	.get_dplls = ibx_get_dpll,
618 	.put_dplls = intel_put_dpll,
619 	.dump_hw_state = ibx_dump_hw_state,
620 };
621 
622 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
623 				 struct intel_shared_dpll *pll)
624 {
625 	const enum intel_dpll_id id = pll->info->id;
626 
627 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
628 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
629 	udelay(20);
630 }
631 
632 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
633 				struct intel_shared_dpll *pll)
634 {
635 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
636 	intel_de_posting_read(dev_priv, SPLL_CTL);
637 	udelay(20);
638 }
639 
640 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
641 				  struct intel_shared_dpll *pll)
642 {
643 	const enum intel_dpll_id id = pll->info->id;
644 
645 	intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
646 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
647 
648 	/*
649 	 * Try to set up the PCH reference clock once all DPLLs
650 	 * that depend on it have been shut down.
651 	 */
652 	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
653 		intel_init_pch_refclk(dev_priv);
654 }
655 
656 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
657 				 struct intel_shared_dpll *pll)
658 {
659 	enum intel_dpll_id id = pll->info->id;
660 
661 	intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
662 	intel_de_posting_read(dev_priv, SPLL_CTL);
663 
664 	/*
665 	 * Try to set up the PCH reference clock once all DPLLs
666 	 * that depend on it have been shut down.
667 	 */
668 	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
669 		intel_init_pch_refclk(dev_priv);
670 }
671 
672 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
673 				       struct intel_shared_dpll *pll,
674 				       struct intel_dpll_hw_state *hw_state)
675 {
676 	const enum intel_dpll_id id = pll->info->id;
677 	intel_wakeref_t wakeref;
678 	u32 val;
679 
680 	wakeref = intel_display_power_get_if_enabled(dev_priv,
681 						     POWER_DOMAIN_DISPLAY_CORE);
682 	if (!wakeref)
683 		return false;
684 
685 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
686 	hw_state->wrpll = val;
687 
688 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
689 
690 	return val & WRPLL_PLL_ENABLE;
691 }
692 
693 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
694 				      struct intel_shared_dpll *pll,
695 				      struct intel_dpll_hw_state *hw_state)
696 {
697 	intel_wakeref_t wakeref;
698 	u32 val;
699 
700 	wakeref = intel_display_power_get_if_enabled(dev_priv,
701 						     POWER_DOMAIN_DISPLAY_CORE);
702 	if (!wakeref)
703 		return false;
704 
705 	val = intel_de_read(dev_priv, SPLL_CTL);
706 	hw_state->spll = val;
707 
708 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
709 
710 	return val & SPLL_PLL_ENABLE;
711 }
712 
713 #define LC_FREQ 2700
714 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
715 
716 #define P_MIN 2
717 #define P_MAX 64
718 #define P_INC 2
719 
720 /* Constraints for PLL good behavior */
721 #define REF_MIN 48
722 #define REF_MAX 400
723 #define VCO_MIN 2400
724 #define VCO_MAX 4800
725 
726 struct hsw_wrpll_rnp {
727 	unsigned p, n2, r2;
728 };
729 
730 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
731 {
732 	switch (clock) {
733 	case 25175000:
734 	case 25200000:
735 	case 27000000:
736 	case 27027000:
737 	case 37762500:
738 	case 37800000:
739 	case 40500000:
740 	case 40541000:
741 	case 54000000:
742 	case 54054000:
743 	case 59341000:
744 	case 59400000:
745 	case 72000000:
746 	case 74176000:
747 	case 74250000:
748 	case 81000000:
749 	case 81081000:
750 	case 89012000:
751 	case 89100000:
752 	case 108000000:
753 	case 108108000:
754 	case 111264000:
755 	case 111375000:
756 	case 148352000:
757 	case 148500000:
758 	case 162000000:
759 	case 162162000:
760 	case 222525000:
761 	case 222750000:
762 	case 296703000:
763 	case 297000000:
764 		return 0;
765 	case 233500000:
766 	case 245250000:
767 	case 247750000:
768 	case 253250000:
769 	case 298000000:
770 		return 1500;
771 	case 169128000:
772 	case 169500000:
773 	case 179500000:
774 	case 202000000:
775 		return 2000;
776 	case 256250000:
777 	case 262500000:
778 	case 270000000:
779 	case 272500000:
780 	case 273750000:
781 	case 280750000:
782 	case 281250000:
783 	case 286000000:
784 	case 291750000:
785 		return 4000;
786 	case 267250000:
787 	case 268500000:
788 		return 5000;
789 	default:
790 		return 1000;
791 	}
792 }
793 
794 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
795 				 unsigned int r2, unsigned int n2,
796 				 unsigned int p,
797 				 struct hsw_wrpll_rnp *best)
798 {
799 	u64 a, b, c, d, diff, diff_best;
800 
801 	/* No best (r,n,p) yet */
802 	if (best->p == 0) {
803 		best->p = p;
804 		best->n2 = n2;
805 		best->r2 = r2;
806 		return;
807 	}
808 
809 	/*
810 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
811 	 * freq2k.
812 	 *
813 	 * delta = 1e6 *
814 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
815 	 *	   freq2k;
816 	 *
817 	 * and we would like delta <= budget.
818 	 *
819 	 * If the discrepancy is above the PPM-based budget, always prefer to
820 	 * improve upon the previous solution.  However, if you're within the
821 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
822 	 */
823 	a = freq2k * budget * p * r2;
824 	b = freq2k * budget * best->p * best->r2;
825 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
826 	diff_best = abs_diff(freq2k * best->p * best->r2,
827 			     LC_FREQ_2K * best->n2);
828 	c = 1000000 * diff;
829 	d = 1000000 * diff_best;
830 
831 	if (a < c && b < d) {
832 		/* If both are above the budget, pick the closer */
833 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
834 			best->p = p;
835 			best->n2 = n2;
836 			best->r2 = r2;
837 		}
838 	} else if (a >= c && b < d) {
839 		/* If A is below the threshold but B is above it?  Update. */
840 		best->p = p;
841 		best->n2 = n2;
842 		best->r2 = r2;
843 	} else if (a >= c && b >= d) {
844 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
845 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
846 			best->p = p;
847 			best->n2 = n2;
848 			best->r2 = r2;
849 		}
850 	}
851 	/* Otherwise a < c && b >= d, do nothing */
852 }
853 
854 static void
855 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
856 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
857 {
858 	u64 freq2k;
859 	unsigned p, n2, r2;
860 	struct hsw_wrpll_rnp best = {};
861 	unsigned budget;
862 
863 	freq2k = clock / 100;
864 
865 	budget = hsw_wrpll_get_budget_for_freq(clock);
866 
867 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
868 	 * and directly pass the LC PLL to it. */
869 	if (freq2k == 5400000) {
870 		*n2_out = 2;
871 		*p_out = 1;
872 		*r2_out = 2;
873 		return;
874 	}
875 
876 	/*
877 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
878 	 * the WR PLL.
879 	 *
880 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
881 	 * Injecting R2 = 2 * R gives:
882 	 *   REF_MAX * r2 > LC_FREQ * 2 and
883 	 *   REF_MIN * r2 < LC_FREQ * 2
884 	 *
885 	 * Which means the desired boundaries for r2 are:
886 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
887 	 *
888 	 */
889 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
890 	     r2 <= LC_FREQ * 2 / REF_MIN;
891 	     r2++) {
892 
893 		/*
894 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
895 		 *
896 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
897 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
898 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
899 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
900 		 *
901 		 * Which means the desired boundaries for n2 are:
902 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
903 		 */
904 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
905 		     n2 <= VCO_MAX * r2 / LC_FREQ;
906 		     n2++) {
907 
908 			for (p = P_MIN; p <= P_MAX; p += P_INC)
909 				hsw_wrpll_update_rnp(freq2k, budget,
910 						     r2, n2, p, &best);
911 		}
912 	}
913 
914 	*n2_out = best.n2;
915 	*p_out = best.p;
916 	*r2_out = best.r2;
917 }
918 
919 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
920 				  const struct intel_shared_dpll *pll,
921 				  const struct intel_dpll_hw_state *pll_state)
922 {
923 	int refclk;
924 	int n, p, r;
925 	u32 wrpll = pll_state->wrpll;
926 
927 	switch (wrpll & WRPLL_REF_MASK) {
928 	case WRPLL_REF_SPECIAL_HSW:
929 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
930 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
931 			refclk = dev_priv->display.dpll.ref_clks.nssc;
932 			break;
933 		}
934 		fallthrough;
935 	case WRPLL_REF_PCH_SSC:
936 		/*
937 		 * We could calculate spread here, but our checking
938 		 * code only cares about 5% accuracy, and spread is a max of
939 		 * 0.5% downspread.
940 		 */
941 		refclk = dev_priv->display.dpll.ref_clks.ssc;
942 		break;
943 	case WRPLL_REF_LCPLL:
944 		refclk = 2700000;
945 		break;
946 	default:
947 		MISSING_CASE(wrpll);
948 		return 0;
949 	}
950 
951 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
952 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
953 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
954 
955 	/* Convert to KHz, p & r have a fixed point portion */
956 	return (refclk * n / 10) / (p * r) * 2;
957 }
958 
959 static int
960 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
961 			   struct intel_crtc *crtc)
962 {
963 	struct drm_i915_private *i915 = to_i915(state->base.dev);
964 	struct intel_crtc_state *crtc_state =
965 		intel_atomic_get_new_crtc_state(state, crtc);
966 	unsigned int p, n2, r2;
967 
968 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
969 
970 	crtc_state->dpll_hw_state.wrpll =
971 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
972 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
973 		WRPLL_DIVIDER_POST(p);
974 
975 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
976 							&crtc_state->dpll_hw_state);
977 
978 	return 0;
979 }
980 
981 static struct intel_shared_dpll *
982 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
983 		       struct intel_crtc *crtc)
984 {
985 	struct intel_crtc_state *crtc_state =
986 		intel_atomic_get_new_crtc_state(state, crtc);
987 
988 	return intel_find_shared_dpll(state, crtc,
989 				      &crtc_state->dpll_hw_state,
990 				      BIT(DPLL_ID_WRPLL2) |
991 				      BIT(DPLL_ID_WRPLL1));
992 }
993 
994 static int
995 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
996 {
997 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
998 	int clock = crtc_state->port_clock;
999 
1000 	switch (clock / 2) {
1001 	case 81000:
1002 	case 135000:
1003 	case 270000:
1004 		return 0;
1005 	default:
1006 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
1007 			    clock);
1008 		return -EINVAL;
1009 	}
1010 }
1011 
1012 static struct intel_shared_dpll *
1013 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1014 {
1015 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1016 	struct intel_shared_dpll *pll;
1017 	enum intel_dpll_id pll_id;
1018 	int clock = crtc_state->port_clock;
1019 
1020 	switch (clock / 2) {
1021 	case 81000:
1022 		pll_id = DPLL_ID_LCPLL_810;
1023 		break;
1024 	case 135000:
1025 		pll_id = DPLL_ID_LCPLL_1350;
1026 		break;
1027 	case 270000:
1028 		pll_id = DPLL_ID_LCPLL_2700;
1029 		break;
1030 	default:
1031 		MISSING_CASE(clock / 2);
1032 		return NULL;
1033 	}
1034 
1035 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1036 
1037 	if (!pll)
1038 		return NULL;
1039 
1040 	return pll;
1041 }
1042 
1043 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1044 				  const struct intel_shared_dpll *pll,
1045 				  const struct intel_dpll_hw_state *pll_state)
1046 {
1047 	int link_clock = 0;
1048 
1049 	switch (pll->info->id) {
1050 	case DPLL_ID_LCPLL_810:
1051 		link_clock = 81000;
1052 		break;
1053 	case DPLL_ID_LCPLL_1350:
1054 		link_clock = 135000;
1055 		break;
1056 	case DPLL_ID_LCPLL_2700:
1057 		link_clock = 270000;
1058 		break;
1059 	default:
1060 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1061 		break;
1062 	}
1063 
1064 	return link_clock * 2;
1065 }
1066 
1067 static int
1068 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1069 			  struct intel_crtc *crtc)
1070 {
1071 	struct intel_crtc_state *crtc_state =
1072 		intel_atomic_get_new_crtc_state(state, crtc);
1073 
1074 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1075 		return -EINVAL;
1076 
1077 	crtc_state->dpll_hw_state.spll =
1078 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1079 
1080 	return 0;
1081 }
1082 
1083 static struct intel_shared_dpll *
1084 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1085 		      struct intel_crtc *crtc)
1086 {
1087 	struct intel_crtc_state *crtc_state =
1088 		intel_atomic_get_new_crtc_state(state, crtc);
1089 
1090 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1091 				      BIT(DPLL_ID_SPLL));
1092 }
1093 
1094 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1095 				 const struct intel_shared_dpll *pll,
1096 				 const struct intel_dpll_hw_state *pll_state)
1097 {
1098 	int link_clock = 0;
1099 
1100 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1101 	case SPLL_FREQ_810MHz:
1102 		link_clock = 81000;
1103 		break;
1104 	case SPLL_FREQ_1350MHz:
1105 		link_clock = 135000;
1106 		break;
1107 	case SPLL_FREQ_2700MHz:
1108 		link_clock = 270000;
1109 		break;
1110 	default:
1111 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1112 		break;
1113 	}
1114 
1115 	return link_clock * 2;
1116 }
1117 
1118 static int hsw_compute_dpll(struct intel_atomic_state *state,
1119 			    struct intel_crtc *crtc,
1120 			    struct intel_encoder *encoder)
1121 {
1122 	struct intel_crtc_state *crtc_state =
1123 		intel_atomic_get_new_crtc_state(state, crtc);
1124 
1125 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1126 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1127 	else if (intel_crtc_has_dp_encoder(crtc_state))
1128 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1129 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1130 		return hsw_ddi_spll_compute_dpll(state, crtc);
1131 	else
1132 		return -EINVAL;
1133 }
1134 
1135 static int hsw_get_dpll(struct intel_atomic_state *state,
1136 			struct intel_crtc *crtc,
1137 			struct intel_encoder *encoder)
1138 {
1139 	struct intel_crtc_state *crtc_state =
1140 		intel_atomic_get_new_crtc_state(state, crtc);
1141 	struct intel_shared_dpll *pll = NULL;
1142 
1143 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1144 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1145 	else if (intel_crtc_has_dp_encoder(crtc_state))
1146 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1147 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1148 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1149 
1150 	if (!pll)
1151 		return -EINVAL;
1152 
1153 	intel_reference_shared_dpll(state, crtc,
1154 				    pll, &crtc_state->dpll_hw_state);
1155 
1156 	crtc_state->shared_dpll = pll;
1157 
1158 	return 0;
1159 }
1160 
1161 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1162 {
1163 	i915->display.dpll.ref_clks.ssc = 135000;
1164 	/* Non-SSC is only used on non-ULT HSW. */
1165 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1166 		i915->display.dpll.ref_clks.nssc = 24000;
1167 	else
1168 		i915->display.dpll.ref_clks.nssc = 135000;
1169 }
1170 
1171 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1172 			      const struct intel_dpll_hw_state *hw_state)
1173 {
1174 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1175 		    hw_state->wrpll, hw_state->spll);
1176 }
1177 
1178 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1179 	.enable = hsw_ddi_wrpll_enable,
1180 	.disable = hsw_ddi_wrpll_disable,
1181 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1182 	.get_freq = hsw_ddi_wrpll_get_freq,
1183 };
1184 
1185 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1186 	.enable = hsw_ddi_spll_enable,
1187 	.disable = hsw_ddi_spll_disable,
1188 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1189 	.get_freq = hsw_ddi_spll_get_freq,
1190 };
1191 
1192 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1193 				 struct intel_shared_dpll *pll)
1194 {
1195 }
1196 
1197 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1198 				  struct intel_shared_dpll *pll)
1199 {
1200 }
1201 
1202 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1203 				       struct intel_shared_dpll *pll,
1204 				       struct intel_dpll_hw_state *hw_state)
1205 {
1206 	return true;
1207 }
1208 
1209 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1210 	.enable = hsw_ddi_lcpll_enable,
1211 	.disable = hsw_ddi_lcpll_disable,
1212 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1213 	.get_freq = hsw_ddi_lcpll_get_freq,
1214 };
1215 
1216 static const struct dpll_info hsw_plls[] = {
1217 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1218 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1219 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1220 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1221 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1222 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1223 	{ },
1224 };
1225 
1226 static const struct intel_dpll_mgr hsw_pll_mgr = {
1227 	.dpll_info = hsw_plls,
1228 	.compute_dplls = hsw_compute_dpll,
1229 	.get_dplls = hsw_get_dpll,
1230 	.put_dplls = intel_put_dpll,
1231 	.update_ref_clks = hsw_update_dpll_ref_clks,
1232 	.dump_hw_state = hsw_dump_hw_state,
1233 };
1234 
1235 struct skl_dpll_regs {
1236 	i915_reg_t ctl, cfgcr1, cfgcr2;
1237 };
1238 
1239 /* this array is indexed by the *shared* pll id */
1240 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1241 	{
1242 		/* DPLL 0 */
1243 		.ctl = LCPLL1_CTL,
1244 		/* DPLL 0 doesn't support HDMI mode */
1245 	},
1246 	{
1247 		/* DPLL 1 */
1248 		.ctl = LCPLL2_CTL,
1249 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1250 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1251 	},
1252 	{
1253 		/* DPLL 2 */
1254 		.ctl = WRPLL_CTL(0),
1255 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1256 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1257 	},
1258 	{
1259 		/* DPLL 3 */
1260 		.ctl = WRPLL_CTL(1),
1261 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1262 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1263 	},
1264 };
1265 
1266 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1267 				    struct intel_shared_dpll *pll)
1268 {
1269 	const enum intel_dpll_id id = pll->info->id;
1270 
1271 	intel_de_rmw(dev_priv, DPLL_CTRL1,
1272 		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1273 		     pll->state.hw_state.ctrl1 << (id * 6));
1274 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1275 }
1276 
1277 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1278 			       struct intel_shared_dpll *pll)
1279 {
1280 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1281 	const enum intel_dpll_id id = pll->info->id;
1282 
1283 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1284 
1285 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1286 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1287 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1288 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1289 
1290 	/* the enable bit is always bit 31 */
1291 	intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1292 
1293 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1294 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1295 }
1296 
1297 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1298 				 struct intel_shared_dpll *pll)
1299 {
1300 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1301 }
1302 
1303 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1304 				struct intel_shared_dpll *pll)
1305 {
1306 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1307 	const enum intel_dpll_id id = pll->info->id;
1308 
1309 	/* the enable bit is always bit 31 */
1310 	intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1311 	intel_de_posting_read(dev_priv, regs[id].ctl);
1312 }
1313 
1314 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1315 				  struct intel_shared_dpll *pll)
1316 {
1317 }
1318 
1319 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1320 				     struct intel_shared_dpll *pll,
1321 				     struct intel_dpll_hw_state *hw_state)
1322 {
1323 	u32 val;
1324 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1325 	const enum intel_dpll_id id = pll->info->id;
1326 	intel_wakeref_t wakeref;
1327 	bool ret;
1328 
1329 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1330 						     POWER_DOMAIN_DISPLAY_CORE);
1331 	if (!wakeref)
1332 		return false;
1333 
1334 	ret = false;
1335 
1336 	val = intel_de_read(dev_priv, regs[id].ctl);
1337 	if (!(val & LCPLL_PLL_ENABLE))
1338 		goto out;
1339 
1340 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1341 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1342 
1343 	/* avoid reading back stale values if HDMI mode is not enabled */
1344 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1345 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1346 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1347 	}
1348 	ret = true;
1349 
1350 out:
1351 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1352 
1353 	return ret;
1354 }
1355 
1356 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1357 				       struct intel_shared_dpll *pll,
1358 				       struct intel_dpll_hw_state *hw_state)
1359 {
1360 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1361 	const enum intel_dpll_id id = pll->info->id;
1362 	intel_wakeref_t wakeref;
1363 	u32 val;
1364 	bool ret;
1365 
1366 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1367 						     POWER_DOMAIN_DISPLAY_CORE);
1368 	if (!wakeref)
1369 		return false;
1370 
1371 	ret = false;
1372 
1373 	/* DPLL0 is always enabled since it drives CDCLK */
1374 	val = intel_de_read(dev_priv, regs[id].ctl);
1375 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1376 		goto out;
1377 
1378 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1379 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1380 
1381 	ret = true;
1382 
1383 out:
1384 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1385 
1386 	return ret;
1387 }
1388 
1389 struct skl_wrpll_context {
1390 	u64 min_deviation;		/* current minimal deviation */
1391 	u64 central_freq;		/* chosen central freq */
1392 	u64 dco_freq;			/* chosen dco freq */
1393 	unsigned int p;			/* chosen divider */
1394 };
1395 
1396 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1397 #define SKL_DCO_MAX_PDEVIATION	100
1398 #define SKL_DCO_MAX_NDEVIATION	600
1399 
1400 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1401 				  u64 central_freq,
1402 				  u64 dco_freq,
1403 				  unsigned int divider)
1404 {
1405 	u64 deviation;
1406 
1407 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1408 			      central_freq);
1409 
1410 	/* positive deviation */
1411 	if (dco_freq >= central_freq) {
1412 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1413 		    deviation < ctx->min_deviation) {
1414 			ctx->min_deviation = deviation;
1415 			ctx->central_freq = central_freq;
1416 			ctx->dco_freq = dco_freq;
1417 			ctx->p = divider;
1418 		}
1419 	/* negative deviation */
1420 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1421 		   deviation < ctx->min_deviation) {
1422 		ctx->min_deviation = deviation;
1423 		ctx->central_freq = central_freq;
1424 		ctx->dco_freq = dco_freq;
1425 		ctx->p = divider;
1426 	}
1427 }
1428 
1429 static void skl_wrpll_get_multipliers(unsigned int p,
1430 				      unsigned int *p0 /* out */,
1431 				      unsigned int *p1 /* out */,
1432 				      unsigned int *p2 /* out */)
1433 {
1434 	/* even dividers */
1435 	if (p % 2 == 0) {
1436 		unsigned int half = p / 2;
1437 
1438 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1439 			*p0 = 2;
1440 			*p1 = 1;
1441 			*p2 = half;
1442 		} else if (half % 2 == 0) {
1443 			*p0 = 2;
1444 			*p1 = half / 2;
1445 			*p2 = 2;
1446 		} else if (half % 3 == 0) {
1447 			*p0 = 3;
1448 			*p1 = half / 3;
1449 			*p2 = 2;
1450 		} else if (half % 7 == 0) {
1451 			*p0 = 7;
1452 			*p1 = half / 7;
1453 			*p2 = 2;
1454 		}
1455 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1456 		*p0 = 3;
1457 		*p1 = 1;
1458 		*p2 = p / 3;
1459 	} else if (p == 5 || p == 7) {
1460 		*p0 = p;
1461 		*p1 = 1;
1462 		*p2 = 1;
1463 	} else if (p == 15) {
1464 		*p0 = 3;
1465 		*p1 = 1;
1466 		*p2 = 5;
1467 	} else if (p == 21) {
1468 		*p0 = 7;
1469 		*p1 = 1;
1470 		*p2 = 3;
1471 	} else if (p == 35) {
1472 		*p0 = 7;
1473 		*p1 = 1;
1474 		*p2 = 5;
1475 	}
1476 }
1477 
1478 struct skl_wrpll_params {
1479 	u32 dco_fraction;
1480 	u32 dco_integer;
1481 	u32 qdiv_ratio;
1482 	u32 qdiv_mode;
1483 	u32 kdiv;
1484 	u32 pdiv;
1485 	u32 central_freq;
1486 };
1487 
1488 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1489 				      u64 afe_clock,
1490 				      int ref_clock,
1491 				      u64 central_freq,
1492 				      u32 p0, u32 p1, u32 p2)
1493 {
1494 	u64 dco_freq;
1495 
1496 	switch (central_freq) {
1497 	case 9600000000ULL:
1498 		params->central_freq = 0;
1499 		break;
1500 	case 9000000000ULL:
1501 		params->central_freq = 1;
1502 		break;
1503 	case 8400000000ULL:
1504 		params->central_freq = 3;
1505 	}
1506 
1507 	switch (p0) {
1508 	case 1:
1509 		params->pdiv = 0;
1510 		break;
1511 	case 2:
1512 		params->pdiv = 1;
1513 		break;
1514 	case 3:
1515 		params->pdiv = 2;
1516 		break;
1517 	case 7:
1518 		params->pdiv = 4;
1519 		break;
1520 	default:
1521 		WARN(1, "Incorrect PDiv\n");
1522 	}
1523 
1524 	switch (p2) {
1525 	case 5:
1526 		params->kdiv = 0;
1527 		break;
1528 	case 2:
1529 		params->kdiv = 1;
1530 		break;
1531 	case 3:
1532 		params->kdiv = 2;
1533 		break;
1534 	case 1:
1535 		params->kdiv = 3;
1536 		break;
1537 	default:
1538 		WARN(1, "Incorrect KDiv\n");
1539 	}
1540 
1541 	params->qdiv_ratio = p1;
1542 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1543 
1544 	dco_freq = p0 * p1 * p2 * afe_clock;
1545 
1546 	/*
1547 	 * Intermediate values are in Hz.
1548 	 * Divide by MHz to match bsepc
1549 	 */
1550 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1551 	params->dco_fraction =
1552 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1553 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1554 }
1555 
1556 static int
1557 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1558 			int ref_clock,
1559 			struct skl_wrpll_params *wrpll_params)
1560 {
1561 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1562 						 9000000000ULL,
1563 						 9600000000ULL };
1564 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1565 					    24, 28, 30, 32, 36, 40, 42, 44,
1566 					    48, 52, 54, 56, 60, 64, 66, 68,
1567 					    70, 72, 76, 78, 80, 84, 88, 90,
1568 					    92, 96, 98 };
1569 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1570 	static const struct {
1571 		const u8 *list;
1572 		int n_dividers;
1573 	} dividers[] = {
1574 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1575 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1576 	};
1577 	struct skl_wrpll_context ctx = {
1578 		.min_deviation = U64_MAX,
1579 	};
1580 	unsigned int dco, d, i;
1581 	unsigned int p0, p1, p2;
1582 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1583 
1584 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1585 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1586 			for (i = 0; i < dividers[d].n_dividers; i++) {
1587 				unsigned int p = dividers[d].list[i];
1588 				u64 dco_freq = p * afe_clock;
1589 
1590 				skl_wrpll_try_divider(&ctx,
1591 						      dco_central_freq[dco],
1592 						      dco_freq,
1593 						      p);
1594 				/*
1595 				 * Skip the remaining dividers if we're sure to
1596 				 * have found the definitive divider, we can't
1597 				 * improve a 0 deviation.
1598 				 */
1599 				if (ctx.min_deviation == 0)
1600 					goto skip_remaining_dividers;
1601 			}
1602 		}
1603 
1604 skip_remaining_dividers:
1605 		/*
1606 		 * If a solution is found with an even divider, prefer
1607 		 * this one.
1608 		 */
1609 		if (d == 0 && ctx.p)
1610 			break;
1611 	}
1612 
1613 	if (!ctx.p)
1614 		return -EINVAL;
1615 
1616 	/*
1617 	 * gcc incorrectly analyses that these can be used without being
1618 	 * initialized. To be fair, it's hard to guess.
1619 	 */
1620 	p0 = p1 = p2 = 0;
1621 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1622 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1623 				  ctx.central_freq, p0, p1, p2);
1624 
1625 	return 0;
1626 }
1627 
1628 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1629 				  const struct intel_shared_dpll *pll,
1630 				  const struct intel_dpll_hw_state *pll_state)
1631 {
1632 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1633 	u32 p0, p1, p2, dco_freq;
1634 
1635 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1636 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1637 
1638 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1639 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1640 	else
1641 		p1 = 1;
1642 
1643 
1644 	switch (p0) {
1645 	case DPLL_CFGCR2_PDIV_1:
1646 		p0 = 1;
1647 		break;
1648 	case DPLL_CFGCR2_PDIV_2:
1649 		p0 = 2;
1650 		break;
1651 	case DPLL_CFGCR2_PDIV_3:
1652 		p0 = 3;
1653 		break;
1654 	case DPLL_CFGCR2_PDIV_7_INVALID:
1655 		/*
1656 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1657 		 * handling it the same way as PDIV_7.
1658 		 */
1659 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1660 		fallthrough;
1661 	case DPLL_CFGCR2_PDIV_7:
1662 		p0 = 7;
1663 		break;
1664 	default:
1665 		MISSING_CASE(p0);
1666 		return 0;
1667 	}
1668 
1669 	switch (p2) {
1670 	case DPLL_CFGCR2_KDIV_5:
1671 		p2 = 5;
1672 		break;
1673 	case DPLL_CFGCR2_KDIV_2:
1674 		p2 = 2;
1675 		break;
1676 	case DPLL_CFGCR2_KDIV_3:
1677 		p2 = 3;
1678 		break;
1679 	case DPLL_CFGCR2_KDIV_1:
1680 		p2 = 1;
1681 		break;
1682 	default:
1683 		MISSING_CASE(p2);
1684 		return 0;
1685 	}
1686 
1687 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1688 		   ref_clock;
1689 
1690 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1691 		    ref_clock / 0x8000;
1692 
1693 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1694 		return 0;
1695 
1696 	return dco_freq / (p0 * p1 * p2 * 5);
1697 }
1698 
1699 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1700 {
1701 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1702 	struct skl_wrpll_params wrpll_params = {};
1703 	u32 ctrl1, cfgcr1, cfgcr2;
1704 	int ret;
1705 
1706 	/*
1707 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1708 	 * as the DPLL id in this function.
1709 	 */
1710 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1711 
1712 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1713 
1714 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1715 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1716 	if (ret)
1717 		return ret;
1718 
1719 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1720 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1721 		wrpll_params.dco_integer;
1722 
1723 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1724 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1725 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1726 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1727 		wrpll_params.central_freq;
1728 
1729 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1730 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1731 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1732 
1733 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1734 							&crtc_state->dpll_hw_state);
1735 
1736 	return 0;
1737 }
1738 
1739 static int
1740 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1741 {
1742 	u32 ctrl1;
1743 
1744 	/*
1745 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1746 	 * as the DPLL id in this function.
1747 	 */
1748 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1749 	switch (crtc_state->port_clock / 2) {
1750 	case 81000:
1751 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1752 		break;
1753 	case 135000:
1754 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1755 		break;
1756 	case 270000:
1757 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1758 		break;
1759 		/* eDP 1.4 rates */
1760 	case 162000:
1761 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1762 		break;
1763 	case 108000:
1764 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1765 		break;
1766 	case 216000:
1767 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1768 		break;
1769 	}
1770 
1771 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1772 
1773 	return 0;
1774 }
1775 
1776 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1777 				  const struct intel_shared_dpll *pll,
1778 				  const struct intel_dpll_hw_state *pll_state)
1779 {
1780 	int link_clock = 0;
1781 
1782 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1783 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1784 	case DPLL_CTRL1_LINK_RATE_810:
1785 		link_clock = 81000;
1786 		break;
1787 	case DPLL_CTRL1_LINK_RATE_1080:
1788 		link_clock = 108000;
1789 		break;
1790 	case DPLL_CTRL1_LINK_RATE_1350:
1791 		link_clock = 135000;
1792 		break;
1793 	case DPLL_CTRL1_LINK_RATE_1620:
1794 		link_clock = 162000;
1795 		break;
1796 	case DPLL_CTRL1_LINK_RATE_2160:
1797 		link_clock = 216000;
1798 		break;
1799 	case DPLL_CTRL1_LINK_RATE_2700:
1800 		link_clock = 270000;
1801 		break;
1802 	default:
1803 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1804 		break;
1805 	}
1806 
1807 	return link_clock * 2;
1808 }
1809 
1810 static int skl_compute_dpll(struct intel_atomic_state *state,
1811 			    struct intel_crtc *crtc,
1812 			    struct intel_encoder *encoder)
1813 {
1814 	struct intel_crtc_state *crtc_state =
1815 		intel_atomic_get_new_crtc_state(state, crtc);
1816 
1817 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1818 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1819 	else if (intel_crtc_has_dp_encoder(crtc_state))
1820 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1821 	else
1822 		return -EINVAL;
1823 }
1824 
1825 static int skl_get_dpll(struct intel_atomic_state *state,
1826 			struct intel_crtc *crtc,
1827 			struct intel_encoder *encoder)
1828 {
1829 	struct intel_crtc_state *crtc_state =
1830 		intel_atomic_get_new_crtc_state(state, crtc);
1831 	struct intel_shared_dpll *pll;
1832 
1833 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1834 		pll = intel_find_shared_dpll(state, crtc,
1835 					     &crtc_state->dpll_hw_state,
1836 					     BIT(DPLL_ID_SKL_DPLL0));
1837 	else
1838 		pll = intel_find_shared_dpll(state, crtc,
1839 					     &crtc_state->dpll_hw_state,
1840 					     BIT(DPLL_ID_SKL_DPLL3) |
1841 					     BIT(DPLL_ID_SKL_DPLL2) |
1842 					     BIT(DPLL_ID_SKL_DPLL1));
1843 	if (!pll)
1844 		return -EINVAL;
1845 
1846 	intel_reference_shared_dpll(state, crtc,
1847 				    pll, &crtc_state->dpll_hw_state);
1848 
1849 	crtc_state->shared_dpll = pll;
1850 
1851 	return 0;
1852 }
1853 
1854 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1855 				const struct intel_shared_dpll *pll,
1856 				const struct intel_dpll_hw_state *pll_state)
1857 {
1858 	/*
1859 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1860 	 * the internal shift for each field
1861 	 */
1862 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1863 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1864 	else
1865 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1866 }
1867 
1868 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1869 {
1870 	/* No SSC ref */
1871 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1872 }
1873 
1874 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1875 			      const struct intel_dpll_hw_state *hw_state)
1876 {
1877 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1878 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1879 		      hw_state->ctrl1,
1880 		      hw_state->cfgcr1,
1881 		      hw_state->cfgcr2);
1882 }
1883 
1884 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1885 	.enable = skl_ddi_pll_enable,
1886 	.disable = skl_ddi_pll_disable,
1887 	.get_hw_state = skl_ddi_pll_get_hw_state,
1888 	.get_freq = skl_ddi_pll_get_freq,
1889 };
1890 
1891 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1892 	.enable = skl_ddi_dpll0_enable,
1893 	.disable = skl_ddi_dpll0_disable,
1894 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1895 	.get_freq = skl_ddi_pll_get_freq,
1896 };
1897 
1898 static const struct dpll_info skl_plls[] = {
1899 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1900 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1901 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1902 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1903 	{ },
1904 };
1905 
1906 static const struct intel_dpll_mgr skl_pll_mgr = {
1907 	.dpll_info = skl_plls,
1908 	.compute_dplls = skl_compute_dpll,
1909 	.get_dplls = skl_get_dpll,
1910 	.put_dplls = intel_put_dpll,
1911 	.update_ref_clks = skl_update_dpll_ref_clks,
1912 	.dump_hw_state = skl_dump_hw_state,
1913 };
1914 
1915 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1916 				struct intel_shared_dpll *pll)
1917 {
1918 	u32 temp;
1919 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1920 	enum dpio_phy phy;
1921 	enum dpio_channel ch;
1922 
1923 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1924 
1925 	/* Non-SSC reference */
1926 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1927 
1928 	if (IS_GEMINILAKE(dev_priv)) {
1929 		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1930 			     0, PORT_PLL_POWER_ENABLE);
1931 
1932 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1933 				 PORT_PLL_POWER_STATE), 200))
1934 			drm_err(&dev_priv->drm,
1935 				"Power state not set for PLL:%d\n", port);
1936 	}
1937 
1938 	/* Disable 10 bit clock */
1939 	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
1940 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
1941 
1942 	/* Write P1 & P2 */
1943 	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
1944 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1945 
1946 	/* Write M2 integer */
1947 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
1948 		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1949 
1950 	/* Write N */
1951 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
1952 		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1953 
1954 	/* Write M2 fraction */
1955 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
1956 		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1957 
1958 	/* Write M2 fraction enable */
1959 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
1960 		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1961 
1962 	/* Write coeff */
1963 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1964 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1965 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1966 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1967 	temp |= pll->state.hw_state.pll6;
1968 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1969 
1970 	/* Write calibration val */
1971 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
1972 		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1973 
1974 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
1975 		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
1976 
1977 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1978 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1979 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1980 	temp |= pll->state.hw_state.pll10;
1981 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1982 
1983 	/* Recalibrate with new settings */
1984 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1985 	temp |= PORT_PLL_RECALIBRATE;
1986 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1987 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1988 	temp |= pll->state.hw_state.ebb4;
1989 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1990 
1991 	/* Enable PLL */
1992 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
1993 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1994 
1995 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1996 			200))
1997 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1998 
1999 	if (IS_GEMINILAKE(dev_priv)) {
2000 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2001 		temp |= DCC_DELAY_RANGE_2;
2002 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2003 	}
2004 
2005 	/*
2006 	 * While we write to the group register to program all lanes at once we
2007 	 * can read only lane registers and we pick lanes 0/1 for that.
2008 	 */
2009 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2010 	temp &= ~LANE_STAGGER_MASK;
2011 	temp &= ~LANESTAGGER_STRAP_OVRD;
2012 	temp |= pll->state.hw_state.pcsdw12;
2013 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2014 }
2015 
2016 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2017 					struct intel_shared_dpll *pll)
2018 {
2019 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2020 
2021 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2022 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2023 
2024 	if (IS_GEMINILAKE(dev_priv)) {
2025 		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
2026 			     PORT_PLL_POWER_ENABLE, 0);
2027 
2028 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2029 				  PORT_PLL_POWER_STATE), 200))
2030 			drm_err(&dev_priv->drm,
2031 				"Power state not reset for PLL:%d\n", port);
2032 	}
2033 }
2034 
2035 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2036 					struct intel_shared_dpll *pll,
2037 					struct intel_dpll_hw_state *hw_state)
2038 {
2039 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2040 	intel_wakeref_t wakeref;
2041 	enum dpio_phy phy;
2042 	enum dpio_channel ch;
2043 	u32 val;
2044 	bool ret;
2045 
2046 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2047 
2048 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2049 						     POWER_DOMAIN_DISPLAY_CORE);
2050 	if (!wakeref)
2051 		return false;
2052 
2053 	ret = false;
2054 
2055 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2056 	if (!(val & PORT_PLL_ENABLE))
2057 		goto out;
2058 
2059 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2060 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2061 
2062 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2063 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2064 
2065 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2066 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2067 
2068 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2069 	hw_state->pll1 &= PORT_PLL_N_MASK;
2070 
2071 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2072 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2073 
2074 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2075 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2076 
2077 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2078 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2079 			  PORT_PLL_INT_COEFF_MASK |
2080 			  PORT_PLL_GAIN_CTL_MASK;
2081 
2082 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2083 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2084 
2085 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2086 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2087 
2088 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2089 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2090 			   PORT_PLL_DCO_AMP_MASK;
2091 
2092 	/*
2093 	 * While we write to the group register to program all lanes at once we
2094 	 * can read only lane registers. We configure all lanes the same way, so
2095 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2096 	 */
2097 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2098 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2099 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2100 		drm_dbg(&dev_priv->drm,
2101 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2102 			hw_state->pcsdw12,
2103 			intel_de_read(dev_priv,
2104 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2105 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2106 
2107 	ret = true;
2108 
2109 out:
2110 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2111 
2112 	return ret;
2113 }
2114 
2115 /* pre-calculated values for DP linkrates */
2116 static const struct dpll bxt_dp_clk_val[] = {
2117 	/* m2 is .22 binary fixed point */
2118 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2119 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2120 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2121 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2122 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2123 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2124 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2125 };
2126 
2127 static int
2128 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2129 			  struct dpll *clk_div)
2130 {
2131 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2132 
2133 	/* Calculate HDMI div */
2134 	/*
2135 	 * FIXME: tie the following calculation into
2136 	 * i9xx_crtc_compute_clock
2137 	 */
2138 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2139 		return -EINVAL;
2140 
2141 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2142 
2143 	return 0;
2144 }
2145 
2146 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2147 				    struct dpll *clk_div)
2148 {
2149 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2150 	int i;
2151 
2152 	*clk_div = bxt_dp_clk_val[0];
2153 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2154 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2155 			*clk_div = bxt_dp_clk_val[i];
2156 			break;
2157 		}
2158 	}
2159 
2160 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2161 
2162 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2163 		    clk_div->dot != crtc_state->port_clock);
2164 }
2165 
2166 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2167 				     const struct dpll *clk_div)
2168 {
2169 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2170 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2171 	int clock = crtc_state->port_clock;
2172 	int vco = clk_div->vco;
2173 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2174 	u32 lanestagger;
2175 
2176 	if (vco >= 6200000 && vco <= 6700000) {
2177 		prop_coef = 4;
2178 		int_coef = 9;
2179 		gain_ctl = 3;
2180 		targ_cnt = 8;
2181 	} else if ((vco > 5400000 && vco < 6200000) ||
2182 			(vco >= 4800000 && vco < 5400000)) {
2183 		prop_coef = 5;
2184 		int_coef = 11;
2185 		gain_ctl = 3;
2186 		targ_cnt = 9;
2187 	} else if (vco == 5400000) {
2188 		prop_coef = 3;
2189 		int_coef = 8;
2190 		gain_ctl = 1;
2191 		targ_cnt = 9;
2192 	} else {
2193 		drm_err(&i915->drm, "Invalid VCO\n");
2194 		return -EINVAL;
2195 	}
2196 
2197 	if (clock > 270000)
2198 		lanestagger = 0x18;
2199 	else if (clock > 135000)
2200 		lanestagger = 0x0d;
2201 	else if (clock > 67000)
2202 		lanestagger = 0x07;
2203 	else if (clock > 33000)
2204 		lanestagger = 0x04;
2205 	else
2206 		lanestagger = 0x02;
2207 
2208 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2209 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2210 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2211 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2212 
2213 	if (clk_div->m2 & 0x3fffff)
2214 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2215 
2216 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2217 		PORT_PLL_INT_COEFF(int_coef) |
2218 		PORT_PLL_GAIN_CTL(gain_ctl);
2219 
2220 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2221 
2222 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2223 
2224 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2225 		PORT_PLL_DCO_AMP_OVR_EN_H;
2226 
2227 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2228 
2229 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2230 
2231 	return 0;
2232 }
2233 
2234 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2235 				const struct intel_shared_dpll *pll,
2236 				const struct intel_dpll_hw_state *pll_state)
2237 {
2238 	struct dpll clock;
2239 
2240 	clock.m1 = 2;
2241 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2242 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2243 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2244 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2245 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2246 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2247 
2248 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2249 }
2250 
2251 static int
2252 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2253 {
2254 	struct dpll clk_div = {};
2255 
2256 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2257 
2258 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2259 }
2260 
2261 static int
2262 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2263 {
2264 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2265 	struct dpll clk_div = {};
2266 	int ret;
2267 
2268 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2269 
2270 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2271 	if (ret)
2272 		return ret;
2273 
2274 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2275 						      &crtc_state->dpll_hw_state);
2276 
2277 	return 0;
2278 }
2279 
2280 static int bxt_compute_dpll(struct intel_atomic_state *state,
2281 			    struct intel_crtc *crtc,
2282 			    struct intel_encoder *encoder)
2283 {
2284 	struct intel_crtc_state *crtc_state =
2285 		intel_atomic_get_new_crtc_state(state, crtc);
2286 
2287 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2288 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2289 	else if (intel_crtc_has_dp_encoder(crtc_state))
2290 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2291 	else
2292 		return -EINVAL;
2293 }
2294 
2295 static int bxt_get_dpll(struct intel_atomic_state *state,
2296 			struct intel_crtc *crtc,
2297 			struct intel_encoder *encoder)
2298 {
2299 	struct intel_crtc_state *crtc_state =
2300 		intel_atomic_get_new_crtc_state(state, crtc);
2301 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2302 	struct intel_shared_dpll *pll;
2303 	enum intel_dpll_id id;
2304 
2305 	/* 1:1 mapping between ports and PLLs */
2306 	id = (enum intel_dpll_id) encoder->port;
2307 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2308 
2309 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2310 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2311 
2312 	intel_reference_shared_dpll(state, crtc,
2313 				    pll, &crtc_state->dpll_hw_state);
2314 
2315 	crtc_state->shared_dpll = pll;
2316 
2317 	return 0;
2318 }
2319 
2320 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2321 {
2322 	i915->display.dpll.ref_clks.ssc = 100000;
2323 	i915->display.dpll.ref_clks.nssc = 100000;
2324 	/* DSI non-SSC ref 19.2MHz */
2325 }
2326 
2327 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2328 			      const struct intel_dpll_hw_state *hw_state)
2329 {
2330 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2331 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2332 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2333 		    hw_state->ebb0,
2334 		    hw_state->ebb4,
2335 		    hw_state->pll0,
2336 		    hw_state->pll1,
2337 		    hw_state->pll2,
2338 		    hw_state->pll3,
2339 		    hw_state->pll6,
2340 		    hw_state->pll8,
2341 		    hw_state->pll9,
2342 		    hw_state->pll10,
2343 		    hw_state->pcsdw12);
2344 }
2345 
2346 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2347 	.enable = bxt_ddi_pll_enable,
2348 	.disable = bxt_ddi_pll_disable,
2349 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2350 	.get_freq = bxt_ddi_pll_get_freq,
2351 };
2352 
2353 static const struct dpll_info bxt_plls[] = {
2354 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2355 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2356 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2357 	{ },
2358 };
2359 
2360 static const struct intel_dpll_mgr bxt_pll_mgr = {
2361 	.dpll_info = bxt_plls,
2362 	.compute_dplls = bxt_compute_dpll,
2363 	.get_dplls = bxt_get_dpll,
2364 	.put_dplls = intel_put_dpll,
2365 	.update_ref_clks = bxt_update_dpll_ref_clks,
2366 	.dump_hw_state = bxt_dump_hw_state,
2367 };
2368 
2369 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2370 				      int *qdiv, int *kdiv)
2371 {
2372 	/* even dividers */
2373 	if (bestdiv % 2 == 0) {
2374 		if (bestdiv == 2) {
2375 			*pdiv = 2;
2376 			*qdiv = 1;
2377 			*kdiv = 1;
2378 		} else if (bestdiv % 4 == 0) {
2379 			*pdiv = 2;
2380 			*qdiv = bestdiv / 4;
2381 			*kdiv = 2;
2382 		} else if (bestdiv % 6 == 0) {
2383 			*pdiv = 3;
2384 			*qdiv = bestdiv / 6;
2385 			*kdiv = 2;
2386 		} else if (bestdiv % 5 == 0) {
2387 			*pdiv = 5;
2388 			*qdiv = bestdiv / 10;
2389 			*kdiv = 2;
2390 		} else if (bestdiv % 14 == 0) {
2391 			*pdiv = 7;
2392 			*qdiv = bestdiv / 14;
2393 			*kdiv = 2;
2394 		}
2395 	} else {
2396 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2397 			*pdiv = bestdiv;
2398 			*qdiv = 1;
2399 			*kdiv = 1;
2400 		} else { /* 9, 15, 21 */
2401 			*pdiv = bestdiv / 3;
2402 			*qdiv = 1;
2403 			*kdiv = 3;
2404 		}
2405 	}
2406 }
2407 
2408 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2409 				      u32 dco_freq, u32 ref_freq,
2410 				      int pdiv, int qdiv, int kdiv)
2411 {
2412 	u32 dco;
2413 
2414 	switch (kdiv) {
2415 	case 1:
2416 		params->kdiv = 1;
2417 		break;
2418 	case 2:
2419 		params->kdiv = 2;
2420 		break;
2421 	case 3:
2422 		params->kdiv = 4;
2423 		break;
2424 	default:
2425 		WARN(1, "Incorrect KDiv\n");
2426 	}
2427 
2428 	switch (pdiv) {
2429 	case 2:
2430 		params->pdiv = 1;
2431 		break;
2432 	case 3:
2433 		params->pdiv = 2;
2434 		break;
2435 	case 5:
2436 		params->pdiv = 4;
2437 		break;
2438 	case 7:
2439 		params->pdiv = 8;
2440 		break;
2441 	default:
2442 		WARN(1, "Incorrect PDiv\n");
2443 	}
2444 
2445 	WARN_ON(kdiv != 2 && qdiv != 1);
2446 
2447 	params->qdiv_ratio = qdiv;
2448 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2449 
2450 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2451 
2452 	params->dco_integer = dco >> 15;
2453 	params->dco_fraction = dco & 0x7fff;
2454 }
2455 
2456 /*
2457  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2458  * Program half of the nominal DCO divider fraction value.
2459  */
2460 static bool
2461 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2462 {
2463 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2464 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2465 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2466 		 i915->display.dpll.ref_clks.nssc == 38400;
2467 }
2468 
2469 struct icl_combo_pll_params {
2470 	int clock;
2471 	struct skl_wrpll_params wrpll;
2472 };
2473 
2474 /*
2475  * These values alrea already adjusted: they're the bits we write to the
2476  * registers, not the logical values.
2477  */
2478 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2479 	{ 540000,
2480 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2481 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2482 	{ 270000,
2483 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2484 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 	{ 162000,
2486 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2487 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2488 	{ 324000,
2489 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2490 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2491 	{ 216000,
2492 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2493 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2494 	{ 432000,
2495 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2496 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 	{ 648000,
2498 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2499 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 	{ 810000,
2501 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2502 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2503 };
2504 
2505 
2506 /* Also used for 38.4 MHz values. */
2507 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2508 	{ 540000,
2509 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2510 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2511 	{ 270000,
2512 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2513 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2514 	{ 162000,
2515 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2516 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2517 	{ 324000,
2518 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2519 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2520 	{ 216000,
2521 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2522 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2523 	{ 432000,
2524 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2525 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2526 	{ 648000,
2527 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2528 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2529 	{ 810000,
2530 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2531 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2532 };
2533 
2534 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2535 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2536 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2537 };
2538 
2539 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2540 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2541 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2542 };
2543 
2544 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2545 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2546 	/* the following params are unused */
2547 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2548 };
2549 
2550 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2551 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2552 	/* the following params are unused */
2553 };
2554 
2555 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2556 				 struct skl_wrpll_params *pll_params)
2557 {
2558 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2559 	const struct icl_combo_pll_params *params =
2560 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2561 		icl_dp_combo_pll_24MHz_values :
2562 		icl_dp_combo_pll_19_2MHz_values;
2563 	int clock = crtc_state->port_clock;
2564 	int i;
2565 
2566 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2567 		if (clock == params[i].clock) {
2568 			*pll_params = params[i].wrpll;
2569 			return 0;
2570 		}
2571 	}
2572 
2573 	MISSING_CASE(clock);
2574 	return -EINVAL;
2575 }
2576 
2577 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2578 			    struct skl_wrpll_params *pll_params)
2579 {
2580 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2581 
2582 	if (DISPLAY_VER(dev_priv) >= 12) {
2583 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2584 		default:
2585 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2586 			fallthrough;
2587 		case 19200:
2588 		case 38400:
2589 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2590 			break;
2591 		case 24000:
2592 			*pll_params = tgl_tbt_pll_24MHz_values;
2593 			break;
2594 		}
2595 	} else {
2596 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2597 		default:
2598 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2599 			fallthrough;
2600 		case 19200:
2601 		case 38400:
2602 			*pll_params = icl_tbt_pll_19_2MHz_values;
2603 			break;
2604 		case 24000:
2605 			*pll_params = icl_tbt_pll_24MHz_values;
2606 			break;
2607 		}
2608 	}
2609 
2610 	return 0;
2611 }
2612 
2613 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2614 				    const struct intel_shared_dpll *pll,
2615 				    const struct intel_dpll_hw_state *pll_state)
2616 {
2617 	/*
2618 	 * The PLL outputs multiple frequencies at the same time, selection is
2619 	 * made at DDI clock mux level.
2620 	 */
2621 	drm_WARN_ON(&i915->drm, 1);
2622 
2623 	return 0;
2624 }
2625 
2626 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2627 {
2628 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2629 
2630 	/*
2631 	 * For ICL+, the spec states: if reference frequency is 38.4,
2632 	 * use 19.2 because the DPLL automatically divides that by 2.
2633 	 */
2634 	if (ref_clock == 38400)
2635 		ref_clock = 19200;
2636 
2637 	return ref_clock;
2638 }
2639 
2640 static int
2641 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2642 	       struct skl_wrpll_params *wrpll_params)
2643 {
2644 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2645 	int ref_clock = icl_wrpll_ref_clock(i915);
2646 	u32 afe_clock = crtc_state->port_clock * 5;
2647 	u32 dco_min = 7998000;
2648 	u32 dco_max = 10000000;
2649 	u32 dco_mid = (dco_min + dco_max) / 2;
2650 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2651 					 18, 20, 24, 28, 30, 32,  36,  40,
2652 					 42, 44, 48, 50, 52, 54,  56,  60,
2653 					 64, 66, 68, 70, 72, 76,  78,  80,
2654 					 84, 88, 90, 92, 96, 98, 100, 102,
2655 					  3,  5,  7,  9, 15, 21 };
2656 	u32 dco, best_dco = 0, dco_centrality = 0;
2657 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2658 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2659 
2660 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2661 		dco = afe_clock * dividers[d];
2662 
2663 		if (dco <= dco_max && dco >= dco_min) {
2664 			dco_centrality = abs(dco - dco_mid);
2665 
2666 			if (dco_centrality < best_dco_centrality) {
2667 				best_dco_centrality = dco_centrality;
2668 				best_div = dividers[d];
2669 				best_dco = dco;
2670 			}
2671 		}
2672 	}
2673 
2674 	if (best_div == 0)
2675 		return -EINVAL;
2676 
2677 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2678 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2679 				  pdiv, qdiv, kdiv);
2680 
2681 	return 0;
2682 }
2683 
2684 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2685 				      const struct intel_shared_dpll *pll,
2686 				      const struct intel_dpll_hw_state *pll_state)
2687 {
2688 	int ref_clock = icl_wrpll_ref_clock(i915);
2689 	u32 dco_fraction;
2690 	u32 p0, p1, p2, dco_freq;
2691 
2692 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2693 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2694 
2695 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2696 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2697 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2698 	else
2699 		p1 = 1;
2700 
2701 	switch (p0) {
2702 	case DPLL_CFGCR1_PDIV_2:
2703 		p0 = 2;
2704 		break;
2705 	case DPLL_CFGCR1_PDIV_3:
2706 		p0 = 3;
2707 		break;
2708 	case DPLL_CFGCR1_PDIV_5:
2709 		p0 = 5;
2710 		break;
2711 	case DPLL_CFGCR1_PDIV_7:
2712 		p0 = 7;
2713 		break;
2714 	}
2715 
2716 	switch (p2) {
2717 	case DPLL_CFGCR1_KDIV_1:
2718 		p2 = 1;
2719 		break;
2720 	case DPLL_CFGCR1_KDIV_2:
2721 		p2 = 2;
2722 		break;
2723 	case DPLL_CFGCR1_KDIV_3:
2724 		p2 = 3;
2725 		break;
2726 	}
2727 
2728 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2729 		   ref_clock;
2730 
2731 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2732 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2733 
2734 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2735 		dco_fraction *= 2;
2736 
2737 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2738 
2739 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2740 		return 0;
2741 
2742 	return dco_freq / (p0 * p1 * p2 * 5);
2743 }
2744 
2745 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2746 				const struct skl_wrpll_params *pll_params,
2747 				struct intel_dpll_hw_state *pll_state)
2748 {
2749 	u32 dco_fraction = pll_params->dco_fraction;
2750 
2751 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2752 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2753 
2754 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2755 			    pll_params->dco_integer;
2756 
2757 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2758 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2759 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2760 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2761 
2762 	if (DISPLAY_VER(i915) >= 12)
2763 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2764 	else
2765 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2766 
2767 	if (i915->display.vbt.override_afc_startup)
2768 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2769 }
2770 
2771 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2772 				    u32 *target_dco_khz,
2773 				    struct intel_dpll_hw_state *state,
2774 				    bool is_dkl)
2775 {
2776 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2777 	u32 dco_min_freq, dco_max_freq;
2778 	unsigned int i;
2779 	int div2;
2780 
2781 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2782 	dco_max_freq = is_dp ? 8100000 : 10000000;
2783 
2784 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2785 		int div1 = div1_vals[i];
2786 
2787 		for (div2 = 10; div2 > 0; div2--) {
2788 			int dco = div1 * div2 * clock_khz * 5;
2789 			int a_divratio, tlinedrv, inputsel;
2790 			u32 hsdiv;
2791 
2792 			if (dco < dco_min_freq || dco > dco_max_freq)
2793 				continue;
2794 
2795 			if (div2 >= 2) {
2796 				/*
2797 				 * Note: a_divratio not matching TGL BSpec
2798 				 * algorithm but matching hardcoded values and
2799 				 * working on HW for DP alt-mode at least
2800 				 */
2801 				a_divratio = is_dp ? 10 : 5;
2802 				tlinedrv = is_dkl ? 1 : 2;
2803 			} else {
2804 				a_divratio = 5;
2805 				tlinedrv = 0;
2806 			}
2807 			inputsel = is_dp ? 0 : 1;
2808 
2809 			switch (div1) {
2810 			default:
2811 				MISSING_CASE(div1);
2812 				fallthrough;
2813 			case 2:
2814 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2815 				break;
2816 			case 3:
2817 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2818 				break;
2819 			case 5:
2820 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2821 				break;
2822 			case 7:
2823 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2824 				break;
2825 			}
2826 
2827 			*target_dco_khz = dco;
2828 
2829 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2830 
2831 			state->mg_clktop2_coreclkctl1 =
2832 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2833 
2834 			state->mg_clktop2_hsclkctl =
2835 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2836 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2837 				hsdiv |
2838 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2839 
2840 			return 0;
2841 		}
2842 	}
2843 
2844 	return -EINVAL;
2845 }
2846 
2847 /*
2848  * The specification for this function uses real numbers, so the math had to be
2849  * adapted to integer-only calculation, that's why it looks so different.
2850  */
2851 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2852 				 struct intel_dpll_hw_state *pll_state)
2853 {
2854 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2855 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2856 	int clock = crtc_state->port_clock;
2857 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2858 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2859 	u32 prop_coeff, int_coeff;
2860 	u32 tdc_targetcnt, feedfwgain;
2861 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2862 	u64 tmp;
2863 	bool use_ssc = false;
2864 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2865 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2866 	int ret;
2867 
2868 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2869 				       pll_state, is_dkl);
2870 	if (ret)
2871 		return ret;
2872 
2873 	m1div = 2;
2874 	m2div_int = dco_khz / (refclk_khz * m1div);
2875 	if (m2div_int > 255) {
2876 		if (!is_dkl) {
2877 			m1div = 4;
2878 			m2div_int = dco_khz / (refclk_khz * m1div);
2879 		}
2880 
2881 		if (m2div_int > 255)
2882 			return -EINVAL;
2883 	}
2884 	m2div_rem = dco_khz % (refclk_khz * m1div);
2885 
2886 	tmp = (u64)m2div_rem * (1 << 22);
2887 	do_div(tmp, refclk_khz * m1div);
2888 	m2div_frac = tmp;
2889 
2890 	switch (refclk_khz) {
2891 	case 19200:
2892 		iref_ndiv = 1;
2893 		iref_trim = 28;
2894 		iref_pulse_w = 1;
2895 		break;
2896 	case 24000:
2897 		iref_ndiv = 1;
2898 		iref_trim = 25;
2899 		iref_pulse_w = 2;
2900 		break;
2901 	case 38400:
2902 		iref_ndiv = 2;
2903 		iref_trim = 28;
2904 		iref_pulse_w = 1;
2905 		break;
2906 	default:
2907 		MISSING_CASE(refclk_khz);
2908 		return -EINVAL;
2909 	}
2910 
2911 	/*
2912 	 * tdc_res = 0.000003
2913 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2914 	 *
2915 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2916 	 * was supposed to be a division, but we rearranged the operations of
2917 	 * the formula to avoid early divisions so we don't multiply the
2918 	 * rounding errors.
2919 	 *
2920 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2921 	 * we also rearrange to work with integers.
2922 	 *
2923 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2924 	 * last division by 10.
2925 	 */
2926 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2927 
2928 	/*
2929 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2930 	 * 32 bits. That's not a problem since we round the division down
2931 	 * anyway.
2932 	 */
2933 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2934 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2935 
2936 	if (dco_khz >= 9000000) {
2937 		prop_coeff = 5;
2938 		int_coeff = 10;
2939 	} else {
2940 		prop_coeff = 4;
2941 		int_coeff = 8;
2942 	}
2943 
2944 	if (use_ssc) {
2945 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2946 		do_div(tmp, refclk_khz * m1div * 10000);
2947 		ssc_stepsize = tmp;
2948 
2949 		tmp = mul_u32_u32(dco_khz, 1000);
2950 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2951 	} else {
2952 		ssc_stepsize = 0;
2953 		ssc_steplen = 0;
2954 	}
2955 	ssc_steplog = 4;
2956 
2957 	/* write pll_state calculations */
2958 	if (is_dkl) {
2959 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2960 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2961 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2962 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2963 		if (dev_priv->display.vbt.override_afc_startup) {
2964 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2965 
2966 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2967 		}
2968 
2969 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2970 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2971 
2972 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2973 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2974 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2975 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2976 
2977 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2978 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2979 
2980 		pll_state->mg_pll_tdc_coldst_bias =
2981 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2982 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2983 
2984 	} else {
2985 		pll_state->mg_pll_div0 =
2986 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2987 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2988 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2989 
2990 		pll_state->mg_pll_div1 =
2991 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2992 			MG_PLL_DIV1_DITHER_DIV_2 |
2993 			MG_PLL_DIV1_NDIVRATIO(1) |
2994 			MG_PLL_DIV1_FBPREDIV(m1div);
2995 
2996 		pll_state->mg_pll_lf =
2997 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2998 			MG_PLL_LF_AFCCNTSEL_512 |
2999 			MG_PLL_LF_GAINCTRL(1) |
3000 			MG_PLL_LF_INT_COEFF(int_coeff) |
3001 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3002 
3003 		pll_state->mg_pll_frac_lock =
3004 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3005 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3006 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3007 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3008 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3009 		if (use_ssc || m2div_rem > 0)
3010 			pll_state->mg_pll_frac_lock |=
3011 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3012 
3013 		pll_state->mg_pll_ssc =
3014 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3015 			MG_PLL_SSC_TYPE(2) |
3016 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3017 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3018 			MG_PLL_SSC_FLLEN |
3019 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3020 
3021 		pll_state->mg_pll_tdc_coldst_bias =
3022 			MG_PLL_TDC_COLDST_COLDSTART |
3023 			MG_PLL_TDC_COLDST_IREFINT_EN |
3024 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3025 			MG_PLL_TDC_TDCOVCCORR_EN |
3026 			MG_PLL_TDC_TDCSEL(3);
3027 
3028 		pll_state->mg_pll_bias =
3029 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3030 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3031 			MG_PLL_BIAS_BIAS_BONUS(10) |
3032 			MG_PLL_BIAS_BIASCAL_EN |
3033 			MG_PLL_BIAS_CTRIM(12) |
3034 			MG_PLL_BIAS_VREF_RDAC(4) |
3035 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3036 
3037 		if (refclk_khz == 38400) {
3038 			pll_state->mg_pll_tdc_coldst_bias_mask =
3039 				MG_PLL_TDC_COLDST_COLDSTART;
3040 			pll_state->mg_pll_bias_mask = 0;
3041 		} else {
3042 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3043 			pll_state->mg_pll_bias_mask = -1U;
3044 		}
3045 
3046 		pll_state->mg_pll_tdc_coldst_bias &=
3047 			pll_state->mg_pll_tdc_coldst_bias_mask;
3048 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3049 	}
3050 
3051 	return 0;
3052 }
3053 
3054 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3055 				   const struct intel_shared_dpll *pll,
3056 				   const struct intel_dpll_hw_state *pll_state)
3057 {
3058 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3059 	u64 tmp;
3060 
3061 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3062 
3063 	if (DISPLAY_VER(dev_priv) >= 12) {
3064 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3065 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3066 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3067 
3068 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3069 			m2_frac = pll_state->mg_pll_bias &
3070 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3071 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3072 		} else {
3073 			m2_frac = 0;
3074 		}
3075 	} else {
3076 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3077 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3078 
3079 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3080 			m2_frac = pll_state->mg_pll_div0 &
3081 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3082 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3083 		} else {
3084 			m2_frac = 0;
3085 		}
3086 	}
3087 
3088 	switch (pll_state->mg_clktop2_hsclkctl &
3089 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3090 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3091 		div1 = 2;
3092 		break;
3093 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3094 		div1 = 3;
3095 		break;
3096 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3097 		div1 = 5;
3098 		break;
3099 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3100 		div1 = 7;
3101 		break;
3102 	default:
3103 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3104 		return 0;
3105 	}
3106 
3107 	div2 = (pll_state->mg_clktop2_hsclkctl &
3108 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3109 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3110 
3111 	/* div2 value of 0 is same as 1 means no div */
3112 	if (div2 == 0)
3113 		div2 = 1;
3114 
3115 	/*
3116 	 * Adjust the original formula to delay the division by 2^22 in order to
3117 	 * minimize possible rounding errors.
3118 	 */
3119 	tmp = (u64)m1 * m2_int * ref_clock +
3120 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3121 	tmp = div_u64(tmp, 5 * div1 * div2);
3122 
3123 	return tmp;
3124 }
3125 
3126 /**
3127  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3128  * @crtc_state: state for the CRTC to select the DPLL for
3129  * @port_dpll_id: the active @port_dpll_id to select
3130  *
3131  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3132  * CRTC.
3133  */
3134 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3135 			      enum icl_port_dpll_id port_dpll_id)
3136 {
3137 	struct icl_port_dpll *port_dpll =
3138 		&crtc_state->icl_port_dplls[port_dpll_id];
3139 
3140 	crtc_state->shared_dpll = port_dpll->pll;
3141 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3142 }
3143 
3144 static void icl_update_active_dpll(struct intel_atomic_state *state,
3145 				   struct intel_crtc *crtc,
3146 				   struct intel_encoder *encoder)
3147 {
3148 	struct intel_crtc_state *crtc_state =
3149 		intel_atomic_get_new_crtc_state(state, crtc);
3150 	struct intel_digital_port *primary_port;
3151 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3152 
3153 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3154 		enc_to_mst(encoder)->primary :
3155 		enc_to_dig_port(encoder);
3156 
3157 	if (primary_port &&
3158 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3159 	     intel_tc_port_in_legacy_mode(primary_port)))
3160 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3161 
3162 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3163 }
3164 
3165 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3166 				      struct intel_crtc *crtc)
3167 {
3168 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3169 	struct intel_crtc_state *crtc_state =
3170 		intel_atomic_get_new_crtc_state(state, crtc);
3171 	struct icl_port_dpll *port_dpll =
3172 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3173 	struct skl_wrpll_params pll_params = {};
3174 	int ret;
3175 
3176 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3177 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3178 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3179 	else
3180 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3181 
3182 	if (ret)
3183 		return ret;
3184 
3185 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3186 
3187 	/* this is mainly for the fastset check */
3188 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3189 
3190 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3191 							    &port_dpll->hw_state);
3192 
3193 	return 0;
3194 }
3195 
3196 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3197 				  struct intel_crtc *crtc,
3198 				  struct intel_encoder *encoder)
3199 {
3200 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3201 	struct intel_crtc_state *crtc_state =
3202 		intel_atomic_get_new_crtc_state(state, crtc);
3203 	struct icl_port_dpll *port_dpll =
3204 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3205 	enum port port = encoder->port;
3206 	unsigned long dpll_mask;
3207 
3208 	if (IS_ALDERLAKE_S(dev_priv)) {
3209 		dpll_mask =
3210 			BIT(DPLL_ID_DG1_DPLL3) |
3211 			BIT(DPLL_ID_DG1_DPLL2) |
3212 			BIT(DPLL_ID_ICL_DPLL1) |
3213 			BIT(DPLL_ID_ICL_DPLL0);
3214 	} else if (IS_DG1(dev_priv)) {
3215 		if (port == PORT_D || port == PORT_E) {
3216 			dpll_mask =
3217 				BIT(DPLL_ID_DG1_DPLL2) |
3218 				BIT(DPLL_ID_DG1_DPLL3);
3219 		} else {
3220 			dpll_mask =
3221 				BIT(DPLL_ID_DG1_DPLL0) |
3222 				BIT(DPLL_ID_DG1_DPLL1);
3223 		}
3224 	} else if (IS_ROCKETLAKE(dev_priv)) {
3225 		dpll_mask =
3226 			BIT(DPLL_ID_EHL_DPLL4) |
3227 			BIT(DPLL_ID_ICL_DPLL1) |
3228 			BIT(DPLL_ID_ICL_DPLL0);
3229 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3230 		dpll_mask =
3231 			BIT(DPLL_ID_EHL_DPLL4) |
3232 			BIT(DPLL_ID_ICL_DPLL1) |
3233 			BIT(DPLL_ID_ICL_DPLL0);
3234 	} else {
3235 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3236 	}
3237 
3238 	/* Eliminate DPLLs from consideration if reserved by HTI */
3239 	dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3240 
3241 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3242 						&port_dpll->hw_state,
3243 						dpll_mask);
3244 	if (!port_dpll->pll)
3245 		return -EINVAL;
3246 
3247 	intel_reference_shared_dpll(state, crtc,
3248 				    port_dpll->pll, &port_dpll->hw_state);
3249 
3250 	icl_update_active_dpll(state, crtc, encoder);
3251 
3252 	return 0;
3253 }
3254 
3255 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3256 				    struct intel_crtc *crtc)
3257 {
3258 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3259 	struct intel_crtc_state *crtc_state =
3260 		intel_atomic_get_new_crtc_state(state, crtc);
3261 	struct icl_port_dpll *port_dpll =
3262 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3263 	struct skl_wrpll_params pll_params = {};
3264 	int ret;
3265 
3266 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3267 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3268 	if (ret)
3269 		return ret;
3270 
3271 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3272 
3273 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3274 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3275 	if (ret)
3276 		return ret;
3277 
3278 	/* this is mainly for the fastset check */
3279 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3280 
3281 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3282 							 &port_dpll->hw_state);
3283 
3284 	return 0;
3285 }
3286 
3287 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3288 				struct intel_crtc *crtc,
3289 				struct intel_encoder *encoder)
3290 {
3291 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3292 	struct intel_crtc_state *crtc_state =
3293 		intel_atomic_get_new_crtc_state(state, crtc);
3294 	struct icl_port_dpll *port_dpll =
3295 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3296 	enum intel_dpll_id dpll_id;
3297 	int ret;
3298 
3299 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3300 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3301 						&port_dpll->hw_state,
3302 						BIT(DPLL_ID_ICL_TBTPLL));
3303 	if (!port_dpll->pll)
3304 		return -EINVAL;
3305 	intel_reference_shared_dpll(state, crtc,
3306 				    port_dpll->pll, &port_dpll->hw_state);
3307 
3308 
3309 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3310 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3311 							 encoder->port));
3312 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3313 						&port_dpll->hw_state,
3314 						BIT(dpll_id));
3315 	if (!port_dpll->pll) {
3316 		ret = -EINVAL;
3317 		goto err_unreference_tbt_pll;
3318 	}
3319 	intel_reference_shared_dpll(state, crtc,
3320 				    port_dpll->pll, &port_dpll->hw_state);
3321 
3322 	icl_update_active_dpll(state, crtc, encoder);
3323 
3324 	return 0;
3325 
3326 err_unreference_tbt_pll:
3327 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3328 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3329 
3330 	return ret;
3331 }
3332 
3333 static int icl_compute_dplls(struct intel_atomic_state *state,
3334 			     struct intel_crtc *crtc,
3335 			     struct intel_encoder *encoder)
3336 {
3337 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3338 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3339 
3340 	if (intel_phy_is_combo(dev_priv, phy))
3341 		return icl_compute_combo_phy_dpll(state, crtc);
3342 	else if (intel_phy_is_tc(dev_priv, phy))
3343 		return icl_compute_tc_phy_dplls(state, crtc);
3344 
3345 	MISSING_CASE(phy);
3346 
3347 	return 0;
3348 }
3349 
3350 static int icl_get_dplls(struct intel_atomic_state *state,
3351 			 struct intel_crtc *crtc,
3352 			 struct intel_encoder *encoder)
3353 {
3354 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3355 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3356 
3357 	if (intel_phy_is_combo(dev_priv, phy))
3358 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3359 	else if (intel_phy_is_tc(dev_priv, phy))
3360 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3361 
3362 	MISSING_CASE(phy);
3363 
3364 	return -EINVAL;
3365 }
3366 
3367 static void icl_put_dplls(struct intel_atomic_state *state,
3368 			  struct intel_crtc *crtc)
3369 {
3370 	const struct intel_crtc_state *old_crtc_state =
3371 		intel_atomic_get_old_crtc_state(state, crtc);
3372 	struct intel_crtc_state *new_crtc_state =
3373 		intel_atomic_get_new_crtc_state(state, crtc);
3374 	enum icl_port_dpll_id id;
3375 
3376 	new_crtc_state->shared_dpll = NULL;
3377 
3378 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3379 		const struct icl_port_dpll *old_port_dpll =
3380 			&old_crtc_state->icl_port_dplls[id];
3381 		struct icl_port_dpll *new_port_dpll =
3382 			&new_crtc_state->icl_port_dplls[id];
3383 
3384 		new_port_dpll->pll = NULL;
3385 
3386 		if (!old_port_dpll->pll)
3387 			continue;
3388 
3389 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3390 	}
3391 }
3392 
3393 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3394 				struct intel_shared_dpll *pll,
3395 				struct intel_dpll_hw_state *hw_state)
3396 {
3397 	const enum intel_dpll_id id = pll->info->id;
3398 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3399 	intel_wakeref_t wakeref;
3400 	bool ret = false;
3401 	u32 val;
3402 
3403 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3404 
3405 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3406 						     POWER_DOMAIN_DISPLAY_CORE);
3407 	if (!wakeref)
3408 		return false;
3409 
3410 	val = intel_de_read(dev_priv, enable_reg);
3411 	if (!(val & PLL_ENABLE))
3412 		goto out;
3413 
3414 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3415 						  MG_REFCLKIN_CTL(tc_port));
3416 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3417 
3418 	hw_state->mg_clktop2_coreclkctl1 =
3419 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3420 	hw_state->mg_clktop2_coreclkctl1 &=
3421 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3422 
3423 	hw_state->mg_clktop2_hsclkctl =
3424 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3425 	hw_state->mg_clktop2_hsclkctl &=
3426 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3427 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3428 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3429 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3430 
3431 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3432 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3433 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3434 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3435 						   MG_PLL_FRAC_LOCK(tc_port));
3436 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3437 
3438 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3439 	hw_state->mg_pll_tdc_coldst_bias =
3440 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3441 
3442 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3443 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3444 		hw_state->mg_pll_bias_mask = 0;
3445 	} else {
3446 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3447 		hw_state->mg_pll_bias_mask = -1U;
3448 	}
3449 
3450 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3451 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3452 
3453 	ret = true;
3454 out:
3455 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3456 	return ret;
3457 }
3458 
3459 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3460 				 struct intel_shared_dpll *pll,
3461 				 struct intel_dpll_hw_state *hw_state)
3462 {
3463 	const enum intel_dpll_id id = pll->info->id;
3464 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3465 	intel_wakeref_t wakeref;
3466 	bool ret = false;
3467 	u32 val;
3468 
3469 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3470 						     POWER_DOMAIN_DISPLAY_CORE);
3471 	if (!wakeref)
3472 		return false;
3473 
3474 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3475 	if (!(val & PLL_ENABLE))
3476 		goto out;
3477 
3478 	/*
3479 	 * All registers read here have the same HIP_INDEX_REG even though
3480 	 * they are on different building blocks
3481 	 */
3482 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3483 						       DKL_REFCLKIN_CTL(tc_port));
3484 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3485 
3486 	hw_state->mg_clktop2_hsclkctl =
3487 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3488 	hw_state->mg_clktop2_hsclkctl &=
3489 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3490 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3491 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3492 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3493 
3494 	hw_state->mg_clktop2_coreclkctl1 =
3495 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3496 	hw_state->mg_clktop2_coreclkctl1 &=
3497 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3498 
3499 	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3500 	val = DKL_PLL_DIV0_MASK;
3501 	if (dev_priv->display.vbt.override_afc_startup)
3502 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3503 	hw_state->mg_pll_div0 &= val;
3504 
3505 	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3506 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3507 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3508 
3509 	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3510 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3511 				 DKL_PLL_SSC_STEP_LEN_MASK |
3512 				 DKL_PLL_SSC_STEP_NUM_MASK |
3513 				 DKL_PLL_SSC_EN);
3514 
3515 	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3516 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3517 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3518 
3519 	hw_state->mg_pll_tdc_coldst_bias =
3520 		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3521 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3522 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3523 
3524 	ret = true;
3525 out:
3526 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3527 	return ret;
3528 }
3529 
3530 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3531 				 struct intel_shared_dpll *pll,
3532 				 struct intel_dpll_hw_state *hw_state,
3533 				 i915_reg_t enable_reg)
3534 {
3535 	const enum intel_dpll_id id = pll->info->id;
3536 	intel_wakeref_t wakeref;
3537 	bool ret = false;
3538 	u32 val;
3539 
3540 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3541 						     POWER_DOMAIN_DISPLAY_CORE);
3542 	if (!wakeref)
3543 		return false;
3544 
3545 	val = intel_de_read(dev_priv, enable_reg);
3546 	if (!(val & PLL_ENABLE))
3547 		goto out;
3548 
3549 	if (IS_ALDERLAKE_S(dev_priv)) {
3550 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3551 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3552 	} else if (IS_DG1(dev_priv)) {
3553 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3554 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3555 	} else if (IS_ROCKETLAKE(dev_priv)) {
3556 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3557 						 RKL_DPLL_CFGCR0(id));
3558 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3559 						 RKL_DPLL_CFGCR1(id));
3560 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3561 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3562 						 TGL_DPLL_CFGCR0(id));
3563 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3564 						 TGL_DPLL_CFGCR1(id));
3565 		if (dev_priv->display.vbt.override_afc_startup) {
3566 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3567 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3568 		}
3569 	} else {
3570 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3571 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3572 							 ICL_DPLL_CFGCR0(4));
3573 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3574 							 ICL_DPLL_CFGCR1(4));
3575 		} else {
3576 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3577 							 ICL_DPLL_CFGCR0(id));
3578 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3579 							 ICL_DPLL_CFGCR1(id));
3580 		}
3581 	}
3582 
3583 	ret = true;
3584 out:
3585 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3586 	return ret;
3587 }
3588 
3589 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3590 				   struct intel_shared_dpll *pll,
3591 				   struct intel_dpll_hw_state *hw_state)
3592 {
3593 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3594 
3595 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3596 }
3597 
3598 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3599 				 struct intel_shared_dpll *pll,
3600 				 struct intel_dpll_hw_state *hw_state)
3601 {
3602 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3603 }
3604 
3605 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3606 			   struct intel_shared_dpll *pll)
3607 {
3608 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3609 	const enum intel_dpll_id id = pll->info->id;
3610 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3611 
3612 	if (IS_ALDERLAKE_S(dev_priv)) {
3613 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3614 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3615 	} else if (IS_DG1(dev_priv)) {
3616 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3617 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3618 	} else if (IS_ROCKETLAKE(dev_priv)) {
3619 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3620 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3621 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3622 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3623 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3624 		div0_reg = TGL_DPLL0_DIV0(id);
3625 	} else {
3626 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3627 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3628 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3629 		} else {
3630 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3631 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3632 		}
3633 	}
3634 
3635 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3636 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3637 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3638 			 !i915_mmio_reg_valid(div0_reg));
3639 	if (dev_priv->display.vbt.override_afc_startup &&
3640 	    i915_mmio_reg_valid(div0_reg))
3641 		intel_de_rmw(dev_priv, div0_reg,
3642 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3643 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3644 }
3645 
3646 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3647 			     struct intel_shared_dpll *pll)
3648 {
3649 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3650 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3651 
3652 	/*
3653 	 * Some of the following registers have reserved fields, so program
3654 	 * these with RMW based on a mask. The mask can be fixed or generated
3655 	 * during the calc/readout phase if the mask depends on some other HW
3656 	 * state like refclk, see icl_calc_mg_pll_state().
3657 	 */
3658 	intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
3659 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3660 
3661 	intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
3662 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3663 		     hw_state->mg_clktop2_coreclkctl1);
3664 
3665 	intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
3666 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3667 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3668 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3669 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3670 		     hw_state->mg_clktop2_hsclkctl);
3671 
3672 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3673 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3674 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3675 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3676 		       hw_state->mg_pll_frac_lock);
3677 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3678 
3679 	intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
3680 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3681 
3682 	intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
3683 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3684 		     hw_state->mg_pll_tdc_coldst_bias);
3685 
3686 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3687 }
3688 
3689 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3690 			  struct intel_shared_dpll *pll)
3691 {
3692 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3693 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3694 	u32 val;
3695 
3696 	/*
3697 	 * All registers programmed here have the same HIP_INDEX_REG even
3698 	 * though on different building block
3699 	 */
3700 	/* All the registers are RMW */
3701 	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3702 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3703 	val |= hw_state->mg_refclkin_ctl;
3704 	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3705 
3706 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3707 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3708 	val |= hw_state->mg_clktop2_coreclkctl1;
3709 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3710 
3711 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3712 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3713 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3714 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3715 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3716 	val |= hw_state->mg_clktop2_hsclkctl;
3717 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3718 
3719 	val = DKL_PLL_DIV0_MASK;
3720 	if (dev_priv->display.vbt.override_afc_startup)
3721 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3722 	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3723 			  hw_state->mg_pll_div0);
3724 
3725 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3726 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3727 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3728 	val |= hw_state->mg_pll_div1;
3729 	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3730 
3731 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3732 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3733 		 DKL_PLL_SSC_STEP_LEN_MASK |
3734 		 DKL_PLL_SSC_STEP_NUM_MASK |
3735 		 DKL_PLL_SSC_EN);
3736 	val |= hw_state->mg_pll_ssc;
3737 	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3738 
3739 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3740 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3741 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3742 	val |= hw_state->mg_pll_bias;
3743 	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3744 
3745 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3746 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3747 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3748 	val |= hw_state->mg_pll_tdc_coldst_bias;
3749 	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3750 
3751 	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3752 }
3753 
3754 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3755 				 struct intel_shared_dpll *pll,
3756 				 i915_reg_t enable_reg)
3757 {
3758 	intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
3759 
3760 	/*
3761 	 * The spec says we need to "wait" but it also says it should be
3762 	 * immediate.
3763 	 */
3764 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3765 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3766 			pll->info->id);
3767 }
3768 
3769 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3770 			   struct intel_shared_dpll *pll,
3771 			   i915_reg_t enable_reg)
3772 {
3773 	intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
3774 
3775 	/* Timeout is actually 600us. */
3776 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3777 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3778 }
3779 
3780 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3781 {
3782 	u32 val;
3783 
3784 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3785 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3786 		return;
3787 	/*
3788 	 * Wa_16011069516:adl-p[a0]
3789 	 *
3790 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3791 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3792 	 * sanity check this assumption with a double read, which presumably
3793 	 * returns the correct value even with clock gating on.
3794 	 *
3795 	 * Instead of the usual place for workarounds we apply this one here,
3796 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3797 	 */
3798 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3799 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3800 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3801 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3802 }
3803 
3804 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3805 			     struct intel_shared_dpll *pll)
3806 {
3807 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3808 
3809 	if (IS_JSL_EHL(dev_priv) &&
3810 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3811 
3812 		/*
3813 		 * We need to disable DC states when this DPLL is enabled.
3814 		 * This can be done by taking a reference on DPLL4 power
3815 		 * domain.
3816 		 */
3817 		pll->wakeref = intel_display_power_get(dev_priv,
3818 						       POWER_DOMAIN_DC_OFF);
3819 	}
3820 
3821 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3822 
3823 	icl_dpll_write(dev_priv, pll);
3824 
3825 	/*
3826 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3827 	 * paths should already be setting the appropriate voltage, hence we do
3828 	 * nothing here.
3829 	 */
3830 
3831 	icl_pll_enable(dev_priv, pll, enable_reg);
3832 
3833 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3834 
3835 	/* DVFS post sequence would be here. See the comment above. */
3836 }
3837 
3838 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3839 			   struct intel_shared_dpll *pll)
3840 {
3841 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3842 
3843 	icl_dpll_write(dev_priv, pll);
3844 
3845 	/*
3846 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3847 	 * paths should already be setting the appropriate voltage, hence we do
3848 	 * nothing here.
3849 	 */
3850 
3851 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3852 
3853 	/* DVFS post sequence would be here. See the comment above. */
3854 }
3855 
3856 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3857 			  struct intel_shared_dpll *pll)
3858 {
3859 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3860 
3861 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3862 
3863 	if (DISPLAY_VER(dev_priv) >= 12)
3864 		dkl_pll_write(dev_priv, pll);
3865 	else
3866 		icl_mg_pll_write(dev_priv, pll);
3867 
3868 	/*
3869 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3870 	 * paths should already be setting the appropriate voltage, hence we do
3871 	 * nothing here.
3872 	 */
3873 
3874 	icl_pll_enable(dev_priv, pll, enable_reg);
3875 
3876 	/* DVFS post sequence would be here. See the comment above. */
3877 }
3878 
3879 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3880 			    struct intel_shared_dpll *pll,
3881 			    i915_reg_t enable_reg)
3882 {
3883 	/* The first steps are done by intel_ddi_post_disable(). */
3884 
3885 	/*
3886 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3887 	 * paths should already be setting the appropriate voltage, hence we do
3888 	 * nothing here.
3889 	 */
3890 
3891 	intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
3892 
3893 	/* Timeout is actually 1us. */
3894 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3895 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3896 
3897 	/* DVFS post sequence would be here. See the comment above. */
3898 
3899 	intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
3900 
3901 	/*
3902 	 * The spec says we need to "wait" but it also says it should be
3903 	 * immediate.
3904 	 */
3905 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3906 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3907 			pll->info->id);
3908 }
3909 
3910 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3911 			      struct intel_shared_dpll *pll)
3912 {
3913 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3914 
3915 	icl_pll_disable(dev_priv, pll, enable_reg);
3916 
3917 	if (IS_JSL_EHL(dev_priv) &&
3918 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3919 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3920 					pll->wakeref);
3921 }
3922 
3923 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3924 			    struct intel_shared_dpll *pll)
3925 {
3926 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3927 }
3928 
3929 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3930 			   struct intel_shared_dpll *pll)
3931 {
3932 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3933 
3934 	icl_pll_disable(dev_priv, pll, enable_reg);
3935 }
3936 
3937 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3938 {
3939 	/* No SSC ref */
3940 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3941 }
3942 
3943 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3944 			      const struct intel_dpll_hw_state *hw_state)
3945 {
3946 	drm_dbg_kms(&dev_priv->drm,
3947 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3948 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3949 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3950 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3951 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3952 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3953 		    hw_state->cfgcr0, hw_state->cfgcr1,
3954 		    hw_state->div0,
3955 		    hw_state->mg_refclkin_ctl,
3956 		    hw_state->mg_clktop2_coreclkctl1,
3957 		    hw_state->mg_clktop2_hsclkctl,
3958 		    hw_state->mg_pll_div0,
3959 		    hw_state->mg_pll_div1,
3960 		    hw_state->mg_pll_lf,
3961 		    hw_state->mg_pll_frac_lock,
3962 		    hw_state->mg_pll_ssc,
3963 		    hw_state->mg_pll_bias,
3964 		    hw_state->mg_pll_tdc_coldst_bias);
3965 }
3966 
3967 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3968 	.enable = combo_pll_enable,
3969 	.disable = combo_pll_disable,
3970 	.get_hw_state = combo_pll_get_hw_state,
3971 	.get_freq = icl_ddi_combo_pll_get_freq,
3972 };
3973 
3974 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3975 	.enable = tbt_pll_enable,
3976 	.disable = tbt_pll_disable,
3977 	.get_hw_state = tbt_pll_get_hw_state,
3978 	.get_freq = icl_ddi_tbt_pll_get_freq,
3979 };
3980 
3981 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3982 	.enable = mg_pll_enable,
3983 	.disable = mg_pll_disable,
3984 	.get_hw_state = mg_pll_get_hw_state,
3985 	.get_freq = icl_ddi_mg_pll_get_freq,
3986 };
3987 
3988 static const struct dpll_info icl_plls[] = {
3989 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3990 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3991 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3992 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3993 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3994 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3995 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3996 	{ },
3997 };
3998 
3999 static const struct intel_dpll_mgr icl_pll_mgr = {
4000 	.dpll_info = icl_plls,
4001 	.compute_dplls = icl_compute_dplls,
4002 	.get_dplls = icl_get_dplls,
4003 	.put_dplls = icl_put_dplls,
4004 	.update_active_dpll = icl_update_active_dpll,
4005 	.update_ref_clks = icl_update_dpll_ref_clks,
4006 	.dump_hw_state = icl_dump_hw_state,
4007 };
4008 
4009 static const struct dpll_info ehl_plls[] = {
4010 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4011 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4012 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4013 	{ },
4014 };
4015 
4016 static const struct intel_dpll_mgr ehl_pll_mgr = {
4017 	.dpll_info = ehl_plls,
4018 	.compute_dplls = icl_compute_dplls,
4019 	.get_dplls = icl_get_dplls,
4020 	.put_dplls = icl_put_dplls,
4021 	.update_ref_clks = icl_update_dpll_ref_clks,
4022 	.dump_hw_state = icl_dump_hw_state,
4023 };
4024 
4025 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4026 	.enable = mg_pll_enable,
4027 	.disable = mg_pll_disable,
4028 	.get_hw_state = dkl_pll_get_hw_state,
4029 	.get_freq = icl_ddi_mg_pll_get_freq,
4030 };
4031 
4032 static const struct dpll_info tgl_plls[] = {
4033 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4034 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4035 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4036 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4037 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4038 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4039 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4040 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4041 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4042 	{ },
4043 };
4044 
4045 static const struct intel_dpll_mgr tgl_pll_mgr = {
4046 	.dpll_info = tgl_plls,
4047 	.compute_dplls = icl_compute_dplls,
4048 	.get_dplls = icl_get_dplls,
4049 	.put_dplls = icl_put_dplls,
4050 	.update_active_dpll = icl_update_active_dpll,
4051 	.update_ref_clks = icl_update_dpll_ref_clks,
4052 	.dump_hw_state = icl_dump_hw_state,
4053 };
4054 
4055 static const struct dpll_info rkl_plls[] = {
4056 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4057 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4058 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4059 	{ },
4060 };
4061 
4062 static const struct intel_dpll_mgr rkl_pll_mgr = {
4063 	.dpll_info = rkl_plls,
4064 	.compute_dplls = icl_compute_dplls,
4065 	.get_dplls = icl_get_dplls,
4066 	.put_dplls = icl_put_dplls,
4067 	.update_ref_clks = icl_update_dpll_ref_clks,
4068 	.dump_hw_state = icl_dump_hw_state,
4069 };
4070 
4071 static const struct dpll_info dg1_plls[] = {
4072 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4073 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4074 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4075 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4076 	{ },
4077 };
4078 
4079 static const struct intel_dpll_mgr dg1_pll_mgr = {
4080 	.dpll_info = dg1_plls,
4081 	.compute_dplls = icl_compute_dplls,
4082 	.get_dplls = icl_get_dplls,
4083 	.put_dplls = icl_put_dplls,
4084 	.update_ref_clks = icl_update_dpll_ref_clks,
4085 	.dump_hw_state = icl_dump_hw_state,
4086 };
4087 
4088 static const struct dpll_info adls_plls[] = {
4089 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4090 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4091 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4092 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4093 	{ },
4094 };
4095 
4096 static const struct intel_dpll_mgr adls_pll_mgr = {
4097 	.dpll_info = adls_plls,
4098 	.compute_dplls = icl_compute_dplls,
4099 	.get_dplls = icl_get_dplls,
4100 	.put_dplls = icl_put_dplls,
4101 	.update_ref_clks = icl_update_dpll_ref_clks,
4102 	.dump_hw_state = icl_dump_hw_state,
4103 };
4104 
4105 static const struct dpll_info adlp_plls[] = {
4106 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4107 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4108 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4109 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4110 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4111 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4112 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4113 	{ },
4114 };
4115 
4116 static const struct intel_dpll_mgr adlp_pll_mgr = {
4117 	.dpll_info = adlp_plls,
4118 	.compute_dplls = icl_compute_dplls,
4119 	.get_dplls = icl_get_dplls,
4120 	.put_dplls = icl_put_dplls,
4121 	.update_active_dpll = icl_update_active_dpll,
4122 	.update_ref_clks = icl_update_dpll_ref_clks,
4123 	.dump_hw_state = icl_dump_hw_state,
4124 };
4125 
4126 /**
4127  * intel_shared_dpll_init - Initialize shared DPLLs
4128  * @dev_priv: i915 device
4129  *
4130  * Initialize shared DPLLs for @dev_priv.
4131  */
4132 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4133 {
4134 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4135 	const struct dpll_info *dpll_info;
4136 	int i;
4137 
4138 	mutex_init(&dev_priv->display.dpll.lock);
4139 
4140 	if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
4141 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4142 		dpll_mgr = NULL;
4143 	else if (IS_ALDERLAKE_P(dev_priv))
4144 		dpll_mgr = &adlp_pll_mgr;
4145 	else if (IS_ALDERLAKE_S(dev_priv))
4146 		dpll_mgr = &adls_pll_mgr;
4147 	else if (IS_DG1(dev_priv))
4148 		dpll_mgr = &dg1_pll_mgr;
4149 	else if (IS_ROCKETLAKE(dev_priv))
4150 		dpll_mgr = &rkl_pll_mgr;
4151 	else if (DISPLAY_VER(dev_priv) >= 12)
4152 		dpll_mgr = &tgl_pll_mgr;
4153 	else if (IS_JSL_EHL(dev_priv))
4154 		dpll_mgr = &ehl_pll_mgr;
4155 	else if (DISPLAY_VER(dev_priv) >= 11)
4156 		dpll_mgr = &icl_pll_mgr;
4157 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4158 		dpll_mgr = &bxt_pll_mgr;
4159 	else if (DISPLAY_VER(dev_priv) == 9)
4160 		dpll_mgr = &skl_pll_mgr;
4161 	else if (HAS_DDI(dev_priv))
4162 		dpll_mgr = &hsw_pll_mgr;
4163 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4164 		dpll_mgr = &pch_pll_mgr;
4165 
4166 	if (!dpll_mgr) {
4167 		dev_priv->display.dpll.num_shared_dpll = 0;
4168 		return;
4169 	}
4170 
4171 	dpll_info = dpll_mgr->dpll_info;
4172 
4173 	for (i = 0; dpll_info[i].name; i++) {
4174 		if (drm_WARN_ON(&dev_priv->drm,
4175 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4176 			break;
4177 
4178 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4179 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4180 	}
4181 
4182 	dev_priv->display.dpll.mgr = dpll_mgr;
4183 	dev_priv->display.dpll.num_shared_dpll = i;
4184 }
4185 
4186 /**
4187  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4188  * @state: atomic state
4189  * @crtc: CRTC to compute DPLLs for
4190  * @encoder: encoder
4191  *
4192  * This function computes the DPLL state for the given CRTC and encoder.
4193  *
4194  * The new configuration in the atomic commit @state is made effective by
4195  * calling intel_shared_dpll_swap_state().
4196  *
4197  * Returns:
4198  * 0 on success, negative error code on falure.
4199  */
4200 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4201 			       struct intel_crtc *crtc,
4202 			       struct intel_encoder *encoder)
4203 {
4204 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4205 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4206 
4207 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4208 		return -EINVAL;
4209 
4210 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4211 }
4212 
4213 /**
4214  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4215  * @state: atomic state
4216  * @crtc: CRTC to reserve DPLLs for
4217  * @encoder: encoder
4218  *
4219  * This function reserves all required DPLLs for the given CRTC and encoder
4220  * combination in the current atomic commit @state and the new @crtc atomic
4221  * state.
4222  *
4223  * The new configuration in the atomic commit @state is made effective by
4224  * calling intel_shared_dpll_swap_state().
4225  *
4226  * The reserved DPLLs should be released by calling
4227  * intel_release_shared_dplls().
4228  *
4229  * Returns:
4230  * 0 if all required DPLLs were successfully reserved,
4231  * negative error code otherwise.
4232  */
4233 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4234 			       struct intel_crtc *crtc,
4235 			       struct intel_encoder *encoder)
4236 {
4237 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4238 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4239 
4240 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4241 		return -EINVAL;
4242 
4243 	return dpll_mgr->get_dplls(state, crtc, encoder);
4244 }
4245 
4246 /**
4247  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4248  * @state: atomic state
4249  * @crtc: crtc from which the DPLLs are to be released
4250  *
4251  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4252  * from the current atomic commit @state and the old @crtc atomic state.
4253  *
4254  * The new configuration in the atomic commit @state is made effective by
4255  * calling intel_shared_dpll_swap_state().
4256  */
4257 void intel_release_shared_dplls(struct intel_atomic_state *state,
4258 				struct intel_crtc *crtc)
4259 {
4260 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4261 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4262 
4263 	/*
4264 	 * FIXME: this function is called for every platform having a
4265 	 * compute_clock hook, even though the platform doesn't yet support
4266 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4267 	 * called on those.
4268 	 */
4269 	if (!dpll_mgr)
4270 		return;
4271 
4272 	dpll_mgr->put_dplls(state, crtc);
4273 }
4274 
4275 /**
4276  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4277  * @state: atomic state
4278  * @crtc: the CRTC for which to update the active DPLL
4279  * @encoder: encoder determining the type of port DPLL
4280  *
4281  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4282  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4283  * DPLL selected will be based on the current mode of the encoder's port.
4284  */
4285 void intel_update_active_dpll(struct intel_atomic_state *state,
4286 			      struct intel_crtc *crtc,
4287 			      struct intel_encoder *encoder)
4288 {
4289 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4290 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4291 
4292 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4293 		return;
4294 
4295 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4296 }
4297 
4298 /**
4299  * intel_dpll_get_freq - calculate the DPLL's output frequency
4300  * @i915: i915 device
4301  * @pll: DPLL for which to calculate the output frequency
4302  * @pll_state: DPLL state from which to calculate the output frequency
4303  *
4304  * Return the output frequency corresponding to @pll's passed in @pll_state.
4305  */
4306 int intel_dpll_get_freq(struct drm_i915_private *i915,
4307 			const struct intel_shared_dpll *pll,
4308 			const struct intel_dpll_hw_state *pll_state)
4309 {
4310 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4311 		return 0;
4312 
4313 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4314 }
4315 
4316 /**
4317  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4318  * @i915: i915 device
4319  * @pll: DPLL for which to calculate the output frequency
4320  * @hw_state: DPLL's hardware state
4321  *
4322  * Read out @pll's hardware state into @hw_state.
4323  */
4324 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4325 			     struct intel_shared_dpll *pll,
4326 			     struct intel_dpll_hw_state *hw_state)
4327 {
4328 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4329 }
4330 
4331 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4332 				  struct intel_shared_dpll *pll)
4333 {
4334 	struct intel_crtc *crtc;
4335 
4336 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4337 
4338 	if (IS_JSL_EHL(i915) && pll->on &&
4339 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4340 		pll->wakeref = intel_display_power_get(i915,
4341 						       POWER_DOMAIN_DC_OFF);
4342 	}
4343 
4344 	pll->state.pipe_mask = 0;
4345 	for_each_intel_crtc(&i915->drm, crtc) {
4346 		struct intel_crtc_state *crtc_state =
4347 			to_intel_crtc_state(crtc->base.state);
4348 
4349 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4350 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4351 	}
4352 	pll->active_mask = pll->state.pipe_mask;
4353 
4354 	drm_dbg_kms(&i915->drm,
4355 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4356 		    pll->info->name, pll->state.pipe_mask, pll->on);
4357 }
4358 
4359 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4360 {
4361 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4362 		i915->display.dpll.mgr->update_ref_clks(i915);
4363 }
4364 
4365 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4366 {
4367 	int i;
4368 
4369 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4370 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4371 }
4372 
4373 static void sanitize_dpll_state(struct drm_i915_private *i915,
4374 				struct intel_shared_dpll *pll)
4375 {
4376 	if (!pll->on)
4377 		return;
4378 
4379 	adlp_cmtg_clock_gating_wa(i915, pll);
4380 
4381 	if (pll->active_mask)
4382 		return;
4383 
4384 	drm_dbg_kms(&i915->drm,
4385 		    "%s enabled but not in use, disabling\n",
4386 		    pll->info->name);
4387 
4388 	pll->info->funcs->disable(i915, pll);
4389 	pll->on = false;
4390 }
4391 
4392 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4393 {
4394 	int i;
4395 
4396 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4397 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4398 }
4399 
4400 /**
4401  * intel_dpll_dump_hw_state - write hw_state to dmesg
4402  * @dev_priv: i915 drm device
4403  * @hw_state: hw state to be written to the log
4404  *
4405  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4406  */
4407 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4408 			      const struct intel_dpll_hw_state *hw_state)
4409 {
4410 	if (dev_priv->display.dpll.mgr) {
4411 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4412 	} else {
4413 		/* fallback for platforms that don't use the shared dpll
4414 		 * infrastructure
4415 		 */
4416 		drm_dbg_kms(&dev_priv->drm,
4417 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4418 			    "fp0: 0x%x, fp1: 0x%x\n",
4419 			    hw_state->dpll,
4420 			    hw_state->dpll_md,
4421 			    hw_state->fp0,
4422 			    hw_state->fp1);
4423 	}
4424 }
4425 
4426 static void
4427 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4428 			 struct intel_shared_dpll *pll,
4429 			 struct intel_crtc *crtc,
4430 			 struct intel_crtc_state *new_crtc_state)
4431 {
4432 	struct intel_dpll_hw_state dpll_hw_state;
4433 	u8 pipe_mask;
4434 	bool active;
4435 
4436 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4437 
4438 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4439 
4440 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4441 
4442 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4443 		I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask,
4444 				"pll in active use but not on in sw tracking\n");
4445 		I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask,
4446 				"pll is on but not used by any active pipe\n");
4447 		I915_STATE_WARN(dev_priv, pll->on != active,
4448 				"pll on state mismatch (expected %i, found %i)\n",
4449 				pll->on, active);
4450 	}
4451 
4452 	if (!crtc) {
4453 		I915_STATE_WARN(dev_priv,
4454 				pll->active_mask & ~pll->state.pipe_mask,
4455 				"more active pll users than references: 0x%x vs 0x%x\n",
4456 				pll->active_mask, pll->state.pipe_mask);
4457 
4458 		return;
4459 	}
4460 
4461 	pipe_mask = BIT(crtc->pipe);
4462 
4463 	if (new_crtc_state->hw.active)
4464 		I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask),
4465 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4466 				pipe_name(crtc->pipe), pll->active_mask);
4467 	else
4468 		I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4469 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4470 				pipe_name(crtc->pipe), pll->active_mask);
4471 
4472 	I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask),
4473 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4474 			pipe_mask, pll->state.pipe_mask);
4475 
4476 	I915_STATE_WARN(dev_priv,
4477 			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4478 					  sizeof(dpll_hw_state)),
4479 			"pll hw state mismatch\n");
4480 }
4481 
4482 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4483 				    struct intel_crtc_state *old_crtc_state,
4484 				    struct intel_crtc_state *new_crtc_state)
4485 {
4486 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4487 
4488 	if (new_crtc_state->shared_dpll)
4489 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4490 					 crtc, new_crtc_state);
4491 
4492 	if (old_crtc_state->shared_dpll &&
4493 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4494 		u8 pipe_mask = BIT(crtc->pipe);
4495 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4496 
4497 		I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4498 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4499 				pipe_name(crtc->pipe), pll->active_mask);
4500 		I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask,
4501 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4502 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4503 	}
4504 }
4505 
4506 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4507 {
4508 	int i;
4509 
4510 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4511 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4512 					 NULL, NULL);
4513 }
4514