xref: /linux/arch/hexagon/kernel/time.c (revision ac84bac4062e7fc24f5e2c61c6a414b2a00a29ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Time related functions for Hexagon architecture
4  *
5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/init.h>
9 #include <linux/clockchips.h>
10 #include <linux/clocksource.h>
11 #include <linux/interrupt.h>
12 #include <linux/err.h>
13 #include <linux/platform_device.h>
14 #include <linux/ioport.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/module.h>
19 
20 #include <asm/timer-regs.h>
21 #include <asm/hexagon_vm.h>
22 
23 /*
24  * For the clocksource we need:
25  *	pcycle frequency (600MHz)
26  * For the loops_per_jiffy we need:
27  *	thread/cpu frequency (100MHz)
28  * And for the timer, we need:
29  *	sleep clock rate
30  */
31 
32 cycles_t	pcycle_freq_mhz;
33 cycles_t	thread_freq_mhz;
34 cycles_t	sleep_clk_freq;
35 
36 static struct resource rtos_timer_resources[] = {
37 	{
38 		.start	= RTOS_TIMER_REGS_ADDR,
39 		.end	= RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
40 		.flags	= IORESOURCE_MEM,
41 	},
42 };
43 
44 static struct platform_device rtos_timer_device = {
45 	.name		= "rtos_timer",
46 	.id		= -1,
47 	.num_resources	= ARRAY_SIZE(rtos_timer_resources),
48 	.resource	= rtos_timer_resources,
49 };
50 
51 /*  A lot of this stuff should move into a platform specific section.  */
52 struct adsp_hw_timer_struct {
53 	u32 match;   /*  Match value  */
54 	u32 count;
55 	u32 enable;  /*  [1] - CLR_ON_MATCH_EN, [0] - EN  */
56 	u32 clear;   /*  one-shot register that clears the count  */
57 };
58 
59 /*  Look for "TCX0" for related constants.  */
60 static __iomem struct adsp_hw_timer_struct *rtos_timer;
61 
62 static u64 timer_get_cycles(struct clocksource *cs)
63 {
64 	return (u64) __vmgettime();
65 }
66 
67 static struct clocksource hexagon_clocksource = {
68 	.name		= "pcycles",
69 	.rating		= 250,
70 	.read		= timer_get_cycles,
71 	.mask		= CLOCKSOURCE_MASK(64),
72 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
73 };
74 
75 static int set_next_event(unsigned long delta, struct clock_event_device *evt)
76 {
77 	/*  Assuming the timer will be disabled when we enter here.  */
78 
79 	iowrite32(1, &rtos_timer->clear);
80 	iowrite32(0, &rtos_timer->clear);
81 
82 	iowrite32(delta, &rtos_timer->match);
83 	iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
84 	return 0;
85 }
86 
87 #ifdef CONFIG_SMP
88 /*  Broadcast mechanism  */
89 static void broadcast(const struct cpumask *mask)
90 {
91 	send_ipi(mask, IPI_TIMER);
92 }
93 #endif
94 
95 /* XXX Implement set_state_shutdown() */
96 static struct clock_event_device hexagon_clockevent_dev = {
97 	.name		= "clockevent",
98 	.features	= CLOCK_EVT_FEAT_ONESHOT,
99 	.rating		= 400,
100 	.irq		= RTOS_TIMER_INT,
101 	.set_next_event = set_next_event,
102 #ifdef CONFIG_SMP
103 	.broadcast	= broadcast,
104 #endif
105 };
106 
107 #ifdef CONFIG_SMP
108 static DEFINE_PER_CPU(struct clock_event_device, clock_events);
109 
110 void setup_percpu_clockdev(void)
111 {
112 	int cpu = smp_processor_id();
113 	struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
114 	struct clock_event_device *dummy_clock_dev =
115 		&per_cpu(clock_events, cpu);
116 
117 	memcpy(dummy_clock_dev, ce_dev, sizeof(*dummy_clock_dev));
118 	INIT_LIST_HEAD(&dummy_clock_dev->list);
119 
120 	dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
121 	dummy_clock_dev->cpumask = cpumask_of(cpu);
122 
123 	clockevents_register_device(dummy_clock_dev);
124 }
125 
126 /*  Called from smp.c for each CPU's timer ipi call  */
127 void ipi_timer(void)
128 {
129 	int cpu = smp_processor_id();
130 	struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
131 
132 	ce_dev->event_handler(ce_dev);
133 }
134 #endif /* CONFIG_SMP */
135 
136 static irqreturn_t timer_interrupt(int irq, void *devid)
137 {
138 	struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
139 
140 	iowrite32(0, &rtos_timer->enable);
141 	ce_dev->event_handler(ce_dev);
142 
143 	return IRQ_HANDLED;
144 }
145 
146 /*
147  * time_init_deferred - called by start_kernel to set up timer/clock source
148  *
149  * Install the IRQ handler for the clock, setup timers.
150  * This is done late, as that way, we can use ioremap().
151  *
152  * This runs just before the delay loop is calibrated, and
153  * is used for delay calibration.
154  */
155 void __init time_init_deferred(void)
156 {
157 	struct resource *resource = NULL;
158 	struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
159 	unsigned long flag = IRQF_TIMER | IRQF_TRIGGER_RISING;
160 
161 	ce_dev->cpumask = cpu_all_mask;
162 
163 	if (!resource)
164 		resource = rtos_timer_device.resource;
165 
166 	/*  ioremap here means this has to run later, after paging init  */
167 	rtos_timer = ioremap(resource->start, resource_size(resource));
168 
169 	if (!rtos_timer) {
170 		release_mem_region(resource->start, resource_size(resource));
171 	}
172 	clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000);
173 
174 	/*  Note: the sim generic RTOS clock is apparently really 18750Hz  */
175 
176 	/*
177 	 * Last arg is some guaranteed seconds for which the conversion will
178 	 * work without overflow.
179 	 */
180 	clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4);
181 
182 	ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev);
183 	ce_dev->max_delta_ticks = 0x7fffffff;
184 	ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev);
185 	ce_dev->min_delta_ticks = 0xf;
186 
187 #ifdef CONFIG_SMP
188 	setup_percpu_clockdev();
189 #endif
190 
191 	clockevents_register_device(ce_dev);
192 	if (request_irq(ce_dev->irq, timer_interrupt, flag, "rtos_timer", NULL))
193 		pr_err("Failed to register rtos_timer interrupt\n");
194 }
195 
196 void __init time_init(void)
197 {
198 	late_time_init = time_init_deferred;
199 }
200 
201 void __delay(unsigned long cycles)
202 {
203 	unsigned long long start = __vmgettime();
204 
205 	while ((__vmgettime() - start) < cycles)
206 		cpu_relax();
207 }
208 EXPORT_SYMBOL(__delay);
209 
210 /*
211  * This could become parametric or perhaps even computed at run-time,
212  * but for now we take the observed simulator jitter.
213  */
214 static long long fudgefactor = 350;  /* Maybe lower if kernel optimized. */
215 
216 void __udelay(unsigned long usecs)
217 {
218 	unsigned long long start = __vmgettime();
219 	unsigned long long finish = (pcycle_freq_mhz * usecs) - fudgefactor;
220 
221 	while ((__vmgettime() - start) < finish)
222 		cpu_relax(); /*  not sure how this improves readability  */
223 }
224 EXPORT_SYMBOL(__udelay);
225