xref: /linux/drivers/clk/rockchip/clk.c (revision 3ad0876554cafa368f574d4d408468510543e9ff)
1 /*
2  * Copyright (c) 2014 MundoReader S.L.
3  * Author: Heiko Stuebner <heiko@sntech.de>
4  *
5  * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
6  * Author: Xing Zheng <zhengxing@rock-chips.com>
7  *
8  * based on
9  *
10  * samsung/clk.c
11  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
12  * Copyright (c) 2013 Linaro Ltd.
13  * Author: Thomas Abraham <thomas.ab@samsung.com>
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2 of the License, or
18  * (at your option) any later version.
19  *
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  * GNU General Public License for more details.
24  */
25 
26 #include <linux/slab.h>
27 #include <linux/clk.h>
28 #include <linux/clk-provider.h>
29 #include <linux/mfd/syscon.h>
30 #include <linux/regmap.h>
31 #include <linux/reboot.h>
32 #include <linux/rational.h>
33 #include "clk.h"
34 
35 /**
36  * Register a clock branch.
37  * Most clock branches have a form like
38  *
39  * src1 --|--\
40  *        |M |--[GATE]-[DIV]-
41  * src2 --|--/
42  *
43  * sometimes without one of those components.
44  */
45 static struct clk *rockchip_clk_register_branch(const char *name,
46 		const char *const *parent_names, u8 num_parents,
47 		void __iomem *base,
48 		int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
49 		u8 div_shift, u8 div_width, u8 div_flags,
50 		struct clk_div_table *div_table, int gate_offset,
51 		u8 gate_shift, u8 gate_flags, unsigned long flags,
52 		spinlock_t *lock)
53 {
54 	struct clk *clk;
55 	struct clk_mux *mux = NULL;
56 	struct clk_gate *gate = NULL;
57 	struct clk_divider *div = NULL;
58 	const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
59 			     *gate_ops = NULL;
60 	int ret;
61 
62 	if (num_parents > 1) {
63 		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
64 		if (!mux)
65 			return ERR_PTR(-ENOMEM);
66 
67 		mux->reg = base + muxdiv_offset;
68 		mux->shift = mux_shift;
69 		mux->mask = BIT(mux_width) - 1;
70 		mux->flags = mux_flags;
71 		mux->lock = lock;
72 		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
73 							: &clk_mux_ops;
74 	}
75 
76 	if (gate_offset >= 0) {
77 		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
78 		if (!gate) {
79 			ret = -ENOMEM;
80 			goto err_gate;
81 		}
82 
83 		gate->flags = gate_flags;
84 		gate->reg = base + gate_offset;
85 		gate->bit_idx = gate_shift;
86 		gate->lock = lock;
87 		gate_ops = &clk_gate_ops;
88 	}
89 
90 	if (div_width > 0) {
91 		div = kzalloc(sizeof(*div), GFP_KERNEL);
92 		if (!div) {
93 			ret = -ENOMEM;
94 			goto err_div;
95 		}
96 
97 		div->flags = div_flags;
98 		div->reg = base + muxdiv_offset;
99 		div->shift = div_shift;
100 		div->width = div_width;
101 		div->lock = lock;
102 		div->table = div_table;
103 		div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
104 						? &clk_divider_ro_ops
105 						: &clk_divider_ops;
106 	}
107 
108 	clk = clk_register_composite(NULL, name, parent_names, num_parents,
109 				     mux ? &mux->hw : NULL, mux_ops,
110 				     div ? &div->hw : NULL, div_ops,
111 				     gate ? &gate->hw : NULL, gate_ops,
112 				     flags);
113 
114 	if (IS_ERR(clk)) {
115 		ret = PTR_ERR(clk);
116 		goto err_composite;
117 	}
118 
119 	return clk;
120 err_composite:
121 	kfree(div);
122 err_div:
123 	kfree(gate);
124 err_gate:
125 	kfree(mux);
126 	return ERR_PTR(ret);
127 }
128 
129 struct rockchip_clk_frac {
130 	struct notifier_block			clk_nb;
131 	struct clk_fractional_divider		div;
132 	struct clk_gate				gate;
133 
134 	struct clk_mux				mux;
135 	const struct clk_ops			*mux_ops;
136 	int					mux_frac_idx;
137 
138 	bool					rate_change_remuxed;
139 	int					rate_change_idx;
140 };
141 
142 #define to_rockchip_clk_frac_nb(nb) \
143 			container_of(nb, struct rockchip_clk_frac, clk_nb)
144 
145 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
146 					 unsigned long event, void *data)
147 {
148 	struct clk_notifier_data *ndata = data;
149 	struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
150 	struct clk_mux *frac_mux = &frac->mux;
151 	int ret = 0;
152 
153 	pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
154 		 __func__, event, ndata->old_rate, ndata->new_rate);
155 	if (event == PRE_RATE_CHANGE) {
156 		frac->rate_change_idx =
157 				frac->mux_ops->get_parent(&frac_mux->hw);
158 		if (frac->rate_change_idx != frac->mux_frac_idx) {
159 			frac->mux_ops->set_parent(&frac_mux->hw,
160 						  frac->mux_frac_idx);
161 			frac->rate_change_remuxed = 1;
162 		}
163 	} else if (event == POST_RATE_CHANGE) {
164 		/*
165 		 * The POST_RATE_CHANGE notifier runs directly after the
166 		 * divider clock is set in clk_change_rate, so we'll have
167 		 * remuxed back to the original parent before clk_change_rate
168 		 * reaches the mux itself.
169 		 */
170 		if (frac->rate_change_remuxed) {
171 			frac->mux_ops->set_parent(&frac_mux->hw,
172 						  frac->rate_change_idx);
173 			frac->rate_change_remuxed = 0;
174 		}
175 	}
176 
177 	return notifier_from_errno(ret);
178 }
179 
180 /**
181  * fractional divider must set that denominator is 20 times larger than
182  * numerator to generate precise clock frequency.
183  */
184 static void rockchip_fractional_approximation(struct clk_hw *hw,
185 		unsigned long rate, unsigned long *parent_rate,
186 		unsigned long *m, unsigned long *n)
187 {
188 	struct clk_fractional_divider *fd = to_clk_fd(hw);
189 	unsigned long p_rate, p_parent_rate;
190 	struct clk_hw *p_parent;
191 	unsigned long scale;
192 
193 	p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
194 	if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
195 		p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
196 		p_parent_rate = clk_hw_get_rate(p_parent);
197 		*parent_rate = p_parent_rate;
198 	}
199 
200 	/*
201 	 * Get rate closer to *parent_rate to guarantee there is no overflow
202 	 * for m and n. In the result it will be the nearest rate left shifted
203 	 * by (scale - fd->nwidth) bits.
204 	 */
205 	scale = fls_long(*parent_rate / rate - 1);
206 	if (scale > fd->nwidth)
207 		rate <<= scale - fd->nwidth;
208 
209 	rational_best_approximation(rate, *parent_rate,
210 			GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
211 			m, n);
212 }
213 
214 static struct clk *rockchip_clk_register_frac_branch(
215 		struct rockchip_clk_provider *ctx, const char *name,
216 		const char *const *parent_names, u8 num_parents,
217 		void __iomem *base, int muxdiv_offset, u8 div_flags,
218 		int gate_offset, u8 gate_shift, u8 gate_flags,
219 		unsigned long flags, struct rockchip_clk_branch *child,
220 		spinlock_t *lock)
221 {
222 	struct rockchip_clk_frac *frac;
223 	struct clk *clk;
224 	struct clk_gate *gate = NULL;
225 	struct clk_fractional_divider *div = NULL;
226 	const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
227 
228 	if (muxdiv_offset < 0)
229 		return ERR_PTR(-EINVAL);
230 
231 	if (child && child->branch_type != branch_mux) {
232 		pr_err("%s: fractional child clock for %s can only be a mux\n",
233 		       __func__, name);
234 		return ERR_PTR(-EINVAL);
235 	}
236 
237 	frac = kzalloc(sizeof(*frac), GFP_KERNEL);
238 	if (!frac)
239 		return ERR_PTR(-ENOMEM);
240 
241 	if (gate_offset >= 0) {
242 		gate = &frac->gate;
243 		gate->flags = gate_flags;
244 		gate->reg = base + gate_offset;
245 		gate->bit_idx = gate_shift;
246 		gate->lock = lock;
247 		gate_ops = &clk_gate_ops;
248 	}
249 
250 	div = &frac->div;
251 	div->flags = div_flags;
252 	div->reg = base + muxdiv_offset;
253 	div->mshift = 16;
254 	div->mwidth = 16;
255 	div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
256 	div->nshift = 0;
257 	div->nwidth = 16;
258 	div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
259 	div->lock = lock;
260 	div->approximation = rockchip_fractional_approximation;
261 	div_ops = &clk_fractional_divider_ops;
262 
263 	clk = clk_register_composite(NULL, name, parent_names, num_parents,
264 				     NULL, NULL,
265 				     &div->hw, div_ops,
266 				     gate ? &gate->hw : NULL, gate_ops,
267 				     flags | CLK_SET_RATE_UNGATE);
268 	if (IS_ERR(clk)) {
269 		kfree(frac);
270 		return clk;
271 	}
272 
273 	if (child) {
274 		struct clk_mux *frac_mux = &frac->mux;
275 		struct clk_init_data init;
276 		struct clk *mux_clk;
277 		int i, ret;
278 
279 		frac->mux_frac_idx = -1;
280 		for (i = 0; i < child->num_parents; i++) {
281 			if (!strcmp(name, child->parent_names[i])) {
282 				pr_debug("%s: found fractional parent in mux at pos %d\n",
283 					 __func__, i);
284 				frac->mux_frac_idx = i;
285 				break;
286 			}
287 		}
288 
289 		frac->mux_ops = &clk_mux_ops;
290 		frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
291 
292 		frac_mux->reg = base + child->muxdiv_offset;
293 		frac_mux->shift = child->mux_shift;
294 		frac_mux->mask = BIT(child->mux_width) - 1;
295 		frac_mux->flags = child->mux_flags;
296 		frac_mux->lock = lock;
297 		frac_mux->hw.init = &init;
298 
299 		init.name = child->name;
300 		init.flags = child->flags | CLK_SET_RATE_PARENT;
301 		init.ops = frac->mux_ops;
302 		init.parent_names = child->parent_names;
303 		init.num_parents = child->num_parents;
304 
305 		mux_clk = clk_register(NULL, &frac_mux->hw);
306 		if (IS_ERR(mux_clk)) {
307 			kfree(frac);
308 			return clk;
309 		}
310 
311 		rockchip_clk_add_lookup(ctx, mux_clk, child->id);
312 
313 		/* notifier on the fraction divider to catch rate changes */
314 		if (frac->mux_frac_idx >= 0) {
315 			ret = clk_notifier_register(clk, &frac->clk_nb);
316 			if (ret)
317 				pr_err("%s: failed to register clock notifier for %s\n",
318 						__func__, name);
319 		} else {
320 			pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
321 				__func__, name, child->name);
322 		}
323 	}
324 
325 	return clk;
326 }
327 
328 static struct clk *rockchip_clk_register_factor_branch(const char *name,
329 		const char *const *parent_names, u8 num_parents,
330 		void __iomem *base, unsigned int mult, unsigned int div,
331 		int gate_offset, u8 gate_shift, u8 gate_flags,
332 		unsigned long flags, spinlock_t *lock)
333 {
334 	struct clk *clk;
335 	struct clk_gate *gate = NULL;
336 	struct clk_fixed_factor *fix = NULL;
337 
338 	/* without gate, register a simple factor clock */
339 	if (gate_offset == 0) {
340 		return clk_register_fixed_factor(NULL, name,
341 				parent_names[0], flags, mult,
342 				div);
343 	}
344 
345 	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
346 	if (!gate)
347 		return ERR_PTR(-ENOMEM);
348 
349 	gate->flags = gate_flags;
350 	gate->reg = base + gate_offset;
351 	gate->bit_idx = gate_shift;
352 	gate->lock = lock;
353 
354 	fix = kzalloc(sizeof(*fix), GFP_KERNEL);
355 	if (!fix) {
356 		kfree(gate);
357 		return ERR_PTR(-ENOMEM);
358 	}
359 
360 	fix->mult = mult;
361 	fix->div = div;
362 
363 	clk = clk_register_composite(NULL, name, parent_names, num_parents,
364 				     NULL, NULL,
365 				     &fix->hw, &clk_fixed_factor_ops,
366 				     &gate->hw, &clk_gate_ops, flags);
367 	if (IS_ERR(clk)) {
368 		kfree(fix);
369 		kfree(gate);
370 	}
371 
372 	return clk;
373 }
374 
375 struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
376 			void __iomem *base, unsigned long nr_clks)
377 {
378 	struct rockchip_clk_provider *ctx;
379 	struct clk **clk_table;
380 	int i;
381 
382 	ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
383 	if (!ctx)
384 		return ERR_PTR(-ENOMEM);
385 
386 	clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
387 	if (!clk_table)
388 		goto err_free;
389 
390 	for (i = 0; i < nr_clks; ++i)
391 		clk_table[i] = ERR_PTR(-ENOENT);
392 
393 	ctx->reg_base = base;
394 	ctx->clk_data.clks = clk_table;
395 	ctx->clk_data.clk_num = nr_clks;
396 	ctx->cru_node = np;
397 	spin_lock_init(&ctx->lock);
398 
399 	ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
400 						   "rockchip,grf");
401 
402 	return ctx;
403 
404 err_free:
405 	kfree(ctx);
406 	return ERR_PTR(-ENOMEM);
407 }
408 
409 void __init rockchip_clk_of_add_provider(struct device_node *np,
410 				struct rockchip_clk_provider *ctx)
411 {
412 	if (of_clk_add_provider(np, of_clk_src_onecell_get,
413 				&ctx->clk_data))
414 		pr_err("%s: could not register clk provider\n", __func__);
415 }
416 
417 void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
418 			     struct clk *clk, unsigned int id)
419 {
420 	if (ctx->clk_data.clks && id)
421 		ctx->clk_data.clks[id] = clk;
422 }
423 
424 void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
425 				struct rockchip_pll_clock *list,
426 				unsigned int nr_pll, int grf_lock_offset)
427 {
428 	struct clk *clk;
429 	int idx;
430 
431 	for (idx = 0; idx < nr_pll; idx++, list++) {
432 		clk = rockchip_clk_register_pll(ctx, list->type, list->name,
433 				list->parent_names, list->num_parents,
434 				list->con_offset, grf_lock_offset,
435 				list->lock_shift, list->mode_offset,
436 				list->mode_shift, list->rate_table,
437 				list->flags, list->pll_flags);
438 		if (IS_ERR(clk)) {
439 			pr_err("%s: failed to register clock %s\n", __func__,
440 				list->name);
441 			continue;
442 		}
443 
444 		rockchip_clk_add_lookup(ctx, clk, list->id);
445 	}
446 }
447 
448 void __init rockchip_clk_register_branches(
449 				      struct rockchip_clk_provider *ctx,
450 				      struct rockchip_clk_branch *list,
451 				      unsigned int nr_clk)
452 {
453 	struct clk *clk = NULL;
454 	unsigned int idx;
455 	unsigned long flags;
456 
457 	for (idx = 0; idx < nr_clk; idx++, list++) {
458 		flags = list->flags;
459 
460 		/* catch simple muxes */
461 		switch (list->branch_type) {
462 		case branch_mux:
463 			clk = clk_register_mux(NULL, list->name,
464 				list->parent_names, list->num_parents,
465 				flags, ctx->reg_base + list->muxdiv_offset,
466 				list->mux_shift, list->mux_width,
467 				list->mux_flags, &ctx->lock);
468 			break;
469 		case branch_muxgrf:
470 			clk = rockchip_clk_register_muxgrf(list->name,
471 				list->parent_names, list->num_parents,
472 				flags, ctx->grf, list->muxdiv_offset,
473 				list->mux_shift, list->mux_width,
474 				list->mux_flags);
475 			break;
476 		case branch_divider:
477 			if (list->div_table)
478 				clk = clk_register_divider_table(NULL,
479 					list->name, list->parent_names[0],
480 					flags,
481 					ctx->reg_base + list->muxdiv_offset,
482 					list->div_shift, list->div_width,
483 					list->div_flags, list->div_table,
484 					&ctx->lock);
485 			else
486 				clk = clk_register_divider(NULL, list->name,
487 					list->parent_names[0], flags,
488 					ctx->reg_base + list->muxdiv_offset,
489 					list->div_shift, list->div_width,
490 					list->div_flags, &ctx->lock);
491 			break;
492 		case branch_fraction_divider:
493 			clk = rockchip_clk_register_frac_branch(ctx, list->name,
494 				list->parent_names, list->num_parents,
495 				ctx->reg_base, list->muxdiv_offset,
496 				list->div_flags,
497 				list->gate_offset, list->gate_shift,
498 				list->gate_flags, flags, list->child,
499 				&ctx->lock);
500 			break;
501 		case branch_gate:
502 			flags |= CLK_SET_RATE_PARENT;
503 
504 			clk = clk_register_gate(NULL, list->name,
505 				list->parent_names[0], flags,
506 				ctx->reg_base + list->gate_offset,
507 				list->gate_shift, list->gate_flags, &ctx->lock);
508 			break;
509 		case branch_composite:
510 			clk = rockchip_clk_register_branch(list->name,
511 				list->parent_names, list->num_parents,
512 				ctx->reg_base, list->muxdiv_offset,
513 				list->mux_shift,
514 				list->mux_width, list->mux_flags,
515 				list->div_shift, list->div_width,
516 				list->div_flags, list->div_table,
517 				list->gate_offset, list->gate_shift,
518 				list->gate_flags, flags, &ctx->lock);
519 			break;
520 		case branch_mmc:
521 			clk = rockchip_clk_register_mmc(
522 				list->name,
523 				list->parent_names, list->num_parents,
524 				ctx->reg_base + list->muxdiv_offset,
525 				list->div_shift
526 			);
527 			break;
528 		case branch_inverter:
529 			clk = rockchip_clk_register_inverter(
530 				list->name, list->parent_names,
531 				list->num_parents,
532 				ctx->reg_base + list->muxdiv_offset,
533 				list->div_shift, list->div_flags, &ctx->lock);
534 			break;
535 		case branch_factor:
536 			clk = rockchip_clk_register_factor_branch(
537 				list->name, list->parent_names,
538 				list->num_parents, ctx->reg_base,
539 				list->div_shift, list->div_width,
540 				list->gate_offset, list->gate_shift,
541 				list->gate_flags, flags, &ctx->lock);
542 			break;
543 		case branch_ddrclk:
544 			clk = rockchip_clk_register_ddrclk(
545 				list->name, list->flags,
546 				list->parent_names, list->num_parents,
547 				list->muxdiv_offset, list->mux_shift,
548 				list->mux_width, list->div_shift,
549 				list->div_width, list->div_flags,
550 				ctx->reg_base, &ctx->lock);
551 			break;
552 		}
553 
554 		/* none of the cases above matched */
555 		if (!clk) {
556 			pr_err("%s: unknown clock type %d\n",
557 			       __func__, list->branch_type);
558 			continue;
559 		}
560 
561 		if (IS_ERR(clk)) {
562 			pr_err("%s: failed to register clock %s: %ld\n",
563 			       __func__, list->name, PTR_ERR(clk));
564 			continue;
565 		}
566 
567 		rockchip_clk_add_lookup(ctx, clk, list->id);
568 	}
569 }
570 
571 void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
572 			unsigned int lookup_id,
573 			const char *name, const char *const *parent_names,
574 			u8 num_parents,
575 			const struct rockchip_cpuclk_reg_data *reg_data,
576 			const struct rockchip_cpuclk_rate_table *rates,
577 			int nrates)
578 {
579 	struct clk *clk;
580 
581 	clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
582 					   reg_data, rates, nrates,
583 					   ctx->reg_base, &ctx->lock);
584 	if (IS_ERR(clk)) {
585 		pr_err("%s: failed to register clock %s: %ld\n",
586 		       __func__, name, PTR_ERR(clk));
587 		return;
588 	}
589 
590 	rockchip_clk_add_lookup(ctx, clk, lookup_id);
591 }
592 
593 void __init rockchip_clk_protect_critical(const char *const clocks[],
594 					  int nclocks)
595 {
596 	int i;
597 
598 	/* Protect the clocks that needs to stay on */
599 	for (i = 0; i < nclocks; i++) {
600 		struct clk *clk = __clk_lookup(clocks[i]);
601 
602 		if (clk)
603 			clk_prepare_enable(clk);
604 	}
605 }
606 
607 static void __iomem *rst_base;
608 static unsigned int reg_restart;
609 static void (*cb_restart)(void);
610 static int rockchip_restart_notify(struct notifier_block *this,
611 				   unsigned long mode, void *cmd)
612 {
613 	if (cb_restart)
614 		cb_restart();
615 
616 	writel(0xfdb9, rst_base + reg_restart);
617 	return NOTIFY_DONE;
618 }
619 
620 static struct notifier_block rockchip_restart_handler = {
621 	.notifier_call = rockchip_restart_notify,
622 	.priority = 128,
623 };
624 
625 void __init
626 rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
627 					       unsigned int reg,
628 					       void (*cb)(void))
629 {
630 	int ret;
631 
632 	rst_base = ctx->reg_base;
633 	reg_restart = reg;
634 	cb_restart = cb;
635 	ret = register_restart_handler(&rockchip_restart_handler);
636 	if (ret)
637 		pr_err("%s: cannot register restart handler, %d\n",
638 		       __func__, ret);
639 }
640