xref: /linux/kernel/locking/qspinlock.c (revision 3ad0876554cafa368f574d4d408468510543e9ff)
1 /*
2  * Queued spinlock
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15  * (C) Copyright 2013-2014 Red Hat, Inc.
16  * (C) Copyright 2015 Intel Corp.
17  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
18  *
19  * Authors: Waiman Long <waiman.long@hpe.com>
20  *          Peter Zijlstra <peterz@infradead.org>
21  */
22 
23 #ifndef _GEN_PV_LOCK_SLOWPATH
24 
25 #include <linux/smp.h>
26 #include <linux/bug.h>
27 #include <linux/cpumask.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
32 #include <asm/byteorder.h>
33 #include <asm/qspinlock.h>
34 
35 /*
36  * The basic principle of a queue-based spinlock can best be understood
37  * by studying a classic queue-based spinlock implementation called the
38  * MCS lock. The paper below provides a good description for this kind
39  * of lock.
40  *
41  * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
42  *
43  * This queued spinlock implementation is based on the MCS lock, however to make
44  * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
45  * API, we must modify it somehow.
46  *
47  * In particular; where the traditional MCS lock consists of a tail pointer
48  * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
49  * unlock the next pending (next->locked), we compress both these: {tail,
50  * next->locked} into a single u32 value.
51  *
52  * Since a spinlock disables recursion of its own context and there is a limit
53  * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
54  * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
55  * we can encode the tail by combining the 2-bit nesting level with the cpu
56  * number. With one byte for the lock value and 3 bytes for the tail, only a
57  * 32-bit word is now needed. Even though we only need 1 bit for the lock,
58  * we extend it to a full byte to achieve better performance for architectures
59  * that support atomic byte write.
60  *
61  * We also change the first spinner to spin on the lock bit instead of its
62  * node; whereby avoiding the need to carry a node from lock to unlock, and
63  * preserving existing lock API. This also makes the unlock code simpler and
64  * faster.
65  *
66  * N.B. The current implementation only supports architectures that allow
67  *      atomic operations on smaller 8-bit and 16-bit data types.
68  *
69  */
70 
71 #include "mcs_spinlock.h"
72 
73 #ifdef CONFIG_PARAVIRT_SPINLOCKS
74 #define MAX_NODES	8
75 #else
76 #define MAX_NODES	4
77 #endif
78 
79 /*
80  * Per-CPU queue node structures; we can never have more than 4 nested
81  * contexts: task, softirq, hardirq, nmi.
82  *
83  * Exactly fits one 64-byte cacheline on a 64-bit architecture.
84  *
85  * PV doubles the storage and uses the second cacheline for PV state.
86  */
87 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
88 
89 /*
90  * We must be able to distinguish between no-tail and the tail at 0:0,
91  * therefore increment the cpu number by one.
92  */
93 
94 static inline __pure u32 encode_tail(int cpu, int idx)
95 {
96 	u32 tail;
97 
98 #ifdef CONFIG_DEBUG_SPINLOCK
99 	BUG_ON(idx > 3);
100 #endif
101 	tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
102 	tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
103 
104 	return tail;
105 }
106 
107 static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
108 {
109 	int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
110 	int idx = (tail &  _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
111 
112 	return per_cpu_ptr(&mcs_nodes[idx], cpu);
113 }
114 
115 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
116 
117 /*
118  * By using the whole 2nd least significant byte for the pending bit, we
119  * can allow better optimization of the lock acquisition for the pending
120  * bit holder.
121  *
122  * This internal structure is also used by the set_locked function which
123  * is not restricted to _Q_PENDING_BITS == 8.
124  */
125 struct __qspinlock {
126 	union {
127 		atomic_t val;
128 #ifdef __LITTLE_ENDIAN
129 		struct {
130 			u8	locked;
131 			u8	pending;
132 		};
133 		struct {
134 			u16	locked_pending;
135 			u16	tail;
136 		};
137 #else
138 		struct {
139 			u16	tail;
140 			u16	locked_pending;
141 		};
142 		struct {
143 			u8	reserved[2];
144 			u8	pending;
145 			u8	locked;
146 		};
147 #endif
148 	};
149 };
150 
151 #if _Q_PENDING_BITS == 8
152 /**
153  * clear_pending_set_locked - take ownership and clear the pending bit.
154  * @lock: Pointer to queued spinlock structure
155  *
156  * *,1,0 -> *,0,1
157  *
158  * Lock stealing is not allowed if this function is used.
159  */
160 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
161 {
162 	struct __qspinlock *l = (void *)lock;
163 
164 	WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
165 }
166 
167 /*
168  * xchg_tail - Put in the new queue tail code word & retrieve previous one
169  * @lock : Pointer to queued spinlock structure
170  * @tail : The new queue tail code word
171  * Return: The previous queue tail code word
172  *
173  * xchg(lock, tail), which heads an address dependency
174  *
175  * p,*,* -> n,*,* ; prev = xchg(lock, node)
176  */
177 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
178 {
179 	struct __qspinlock *l = (void *)lock;
180 
181 	/*
182 	 * Use release semantics to make sure that the MCS node is properly
183 	 * initialized before changing the tail code.
184 	 */
185 	return (u32)xchg_release(&l->tail,
186 				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
187 }
188 
189 #else /* _Q_PENDING_BITS == 8 */
190 
191 /**
192  * clear_pending_set_locked - take ownership and clear the pending bit.
193  * @lock: Pointer to queued spinlock structure
194  *
195  * *,1,0 -> *,0,1
196  */
197 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
198 {
199 	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
200 }
201 
202 /**
203  * xchg_tail - Put in the new queue tail code word & retrieve previous one
204  * @lock : Pointer to queued spinlock structure
205  * @tail : The new queue tail code word
206  * Return: The previous queue tail code word
207  *
208  * xchg(lock, tail)
209  *
210  * p,*,* -> n,*,* ; prev = xchg(lock, node)
211  */
212 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
213 {
214 	u32 old, new, val = atomic_read(&lock->val);
215 
216 	for (;;) {
217 		new = (val & _Q_LOCKED_PENDING_MASK) | tail;
218 		/*
219 		 * Use release semantics to make sure that the MCS node is
220 		 * properly initialized before changing the tail code.
221 		 */
222 		old = atomic_cmpxchg_release(&lock->val, val, new);
223 		if (old == val)
224 			break;
225 
226 		val = old;
227 	}
228 	return old;
229 }
230 #endif /* _Q_PENDING_BITS == 8 */
231 
232 /**
233  * set_locked - Set the lock bit and own the lock
234  * @lock: Pointer to queued spinlock structure
235  *
236  * *,*,0 -> *,0,1
237  */
238 static __always_inline void set_locked(struct qspinlock *lock)
239 {
240 	struct __qspinlock *l = (void *)lock;
241 
242 	WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
243 }
244 
245 
246 /*
247  * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
248  * all the PV callbacks.
249  */
250 
251 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
252 static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
253 					   struct mcs_spinlock *prev) { }
254 static __always_inline void __pv_kick_node(struct qspinlock *lock,
255 					   struct mcs_spinlock *node) { }
256 static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
257 						   struct mcs_spinlock *node)
258 						   { return 0; }
259 
260 #define pv_enabled()		false
261 
262 #define pv_init_node		__pv_init_node
263 #define pv_wait_node		__pv_wait_node
264 #define pv_kick_node		__pv_kick_node
265 #define pv_wait_head_or_lock	__pv_wait_head_or_lock
266 
267 #ifdef CONFIG_PARAVIRT_SPINLOCKS
268 #define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
269 #endif
270 
271 #endif /* _GEN_PV_LOCK_SLOWPATH */
272 
273 /**
274  * queued_spin_lock_slowpath - acquire the queued spinlock
275  * @lock: Pointer to queued spinlock structure
276  * @val: Current value of the queued spinlock 32-bit word
277  *
278  * (queue tail, pending bit, lock value)
279  *
280  *              fast     :    slow                                  :    unlock
281  *                       :                                          :
282  * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
283  *                       :       | ^--------.------.             /  :
284  *                       :       v           \      \            |  :
285  * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
286  *                       :       | ^--'              |           |  :
287  *                       :       v                   |           |  :
288  * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
289  *   queue               :       | ^--'                          |  :
290  *                       :       v                               |  :
291  * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
292  *   queue               :         ^--'                             :
293  */
294 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
295 {
296 	struct mcs_spinlock *prev, *next, *node;
297 	u32 new, old, tail;
298 	int idx;
299 
300 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
301 
302 	if (pv_enabled())
303 		goto queue;
304 
305 	if (virt_spin_lock(lock))
306 		return;
307 
308 	/*
309 	 * wait for in-progress pending->locked hand-overs
310 	 *
311 	 * 0,1,0 -> 0,0,1
312 	 */
313 	if (val == _Q_PENDING_VAL) {
314 		while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
315 			cpu_relax();
316 	}
317 
318 	/*
319 	 * trylock || pending
320 	 *
321 	 * 0,0,0 -> 0,0,1 ; trylock
322 	 * 0,0,1 -> 0,1,1 ; pending
323 	 */
324 	for (;;) {
325 		/*
326 		 * If we observe any contention; queue.
327 		 */
328 		if (val & ~_Q_LOCKED_MASK)
329 			goto queue;
330 
331 		new = _Q_LOCKED_VAL;
332 		if (val == new)
333 			new |= _Q_PENDING_VAL;
334 
335 		/*
336 		 * Acquire semantic is required here as the function may
337 		 * return immediately if the lock was free.
338 		 */
339 		old = atomic_cmpxchg_acquire(&lock->val, val, new);
340 		if (old == val)
341 			break;
342 
343 		val = old;
344 	}
345 
346 	/*
347 	 * we won the trylock
348 	 */
349 	if (new == _Q_LOCKED_VAL)
350 		return;
351 
352 	/*
353 	 * we're pending, wait for the owner to go away.
354 	 *
355 	 * *,1,1 -> *,1,0
356 	 *
357 	 * this wait loop must be a load-acquire such that we match the
358 	 * store-release that clears the locked bit and create lock
359 	 * sequentiality; this is because not all clear_pending_set_locked()
360 	 * implementations imply full barriers.
361 	 */
362 	smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
363 
364 	/*
365 	 * take ownership and clear the pending bit.
366 	 *
367 	 * *,1,0 -> *,0,1
368 	 */
369 	clear_pending_set_locked(lock);
370 	return;
371 
372 	/*
373 	 * End of pending bit optimistic spinning and beginning of MCS
374 	 * queuing.
375 	 */
376 queue:
377 	node = this_cpu_ptr(&mcs_nodes[0]);
378 	idx = node->count++;
379 	tail = encode_tail(smp_processor_id(), idx);
380 
381 	node += idx;
382 
383 	/*
384 	 * Ensure that we increment the head node->count before initialising
385 	 * the actual node. If the compiler is kind enough to reorder these
386 	 * stores, then an IRQ could overwrite our assignments.
387 	 */
388 	barrier();
389 
390 	node->locked = 0;
391 	node->next = NULL;
392 	pv_init_node(node);
393 
394 	/*
395 	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
396 	 * attempt the trylock once more in the hope someone let go while we
397 	 * weren't watching.
398 	 */
399 	if (queued_spin_trylock(lock))
400 		goto release;
401 
402 	/*
403 	 * We have already touched the queueing cacheline; don't bother with
404 	 * pending stuff.
405 	 *
406 	 * p,*,* -> n,*,*
407 	 *
408 	 * RELEASE, such that the stores to @node must be complete.
409 	 */
410 	old = xchg_tail(lock, tail);
411 	next = NULL;
412 
413 	/*
414 	 * if there was a previous node; link it and wait until reaching the
415 	 * head of the waitqueue.
416 	 */
417 	if (old & _Q_TAIL_MASK) {
418 		prev = decode_tail(old);
419 
420 		/*
421 		 * We must ensure that the stores to @node are observed before
422 		 * the write to prev->next. The address dependency from
423 		 * xchg_tail is not sufficient to ensure this because the read
424 		 * component of xchg_tail is unordered with respect to the
425 		 * initialisation of @node.
426 		 */
427 		smp_store_release(&prev->next, node);
428 
429 		pv_wait_node(node, prev);
430 		arch_mcs_spin_lock_contended(&node->locked);
431 
432 		/*
433 		 * While waiting for the MCS lock, the next pointer may have
434 		 * been set by another lock waiter. We optimistically load
435 		 * the next pointer & prefetch the cacheline for writing
436 		 * to reduce latency in the upcoming MCS unlock operation.
437 		 */
438 		next = READ_ONCE(node->next);
439 		if (next)
440 			prefetchw(next);
441 	}
442 
443 	/*
444 	 * we're at the head of the waitqueue, wait for the owner & pending to
445 	 * go away.
446 	 *
447 	 * *,x,y -> *,0,0
448 	 *
449 	 * this wait loop must use a load-acquire such that we match the
450 	 * store-release that clears the locked bit and create lock
451 	 * sequentiality; this is because the set_locked() function below
452 	 * does not imply a full barrier.
453 	 *
454 	 * The PV pv_wait_head_or_lock function, if active, will acquire
455 	 * the lock and return a non-zero value. So we have to skip the
456 	 * smp_cond_load_acquire() call. As the next PV queue head hasn't been
457 	 * designated yet, there is no way for the locked value to become
458 	 * _Q_SLOW_VAL. So both the set_locked() and the
459 	 * atomic_cmpxchg_relaxed() calls will be safe.
460 	 *
461 	 * If PV isn't active, 0 will be returned instead.
462 	 *
463 	 */
464 	if ((val = pv_wait_head_or_lock(lock, node)))
465 		goto locked;
466 
467 	val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK));
468 
469 locked:
470 	/*
471 	 * claim the lock:
472 	 *
473 	 * n,0,0 -> 0,0,1 : lock, uncontended
474 	 * *,0,0 -> *,0,1 : lock, contended
475 	 *
476 	 * If the queue head is the only one in the queue (lock value == tail),
477 	 * clear the tail code and grab the lock. Otherwise, we only need
478 	 * to grab the lock.
479 	 */
480 	for (;;) {
481 		/* In the PV case we might already have _Q_LOCKED_VAL set */
482 		if ((val & _Q_TAIL_MASK) != tail) {
483 			set_locked(lock);
484 			break;
485 		}
486 		/*
487 		 * The smp_cond_load_acquire() call above has provided the
488 		 * necessary acquire semantics required for locking. At most
489 		 * two iterations of this loop may be ran.
490 		 */
491 		old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
492 		if (old == val)
493 			goto release;	/* No contention */
494 
495 		val = old;
496 	}
497 
498 	/*
499 	 * contended path; wait for next if not observed yet, release.
500 	 */
501 	if (!next) {
502 		while (!(next = READ_ONCE(node->next)))
503 			cpu_relax();
504 	}
505 
506 	arch_mcs_spin_unlock_contended(&next->locked);
507 	pv_kick_node(lock, next);
508 
509 release:
510 	/*
511 	 * release the node
512 	 */
513 	__this_cpu_dec(mcs_nodes[0].count);
514 }
515 EXPORT_SYMBOL(queued_spin_lock_slowpath);
516 
517 /*
518  * Generate the paravirt code for queued_spin_unlock_slowpath().
519  */
520 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
521 #define _GEN_PV_LOCK_SLOWPATH
522 
523 #undef  pv_enabled
524 #define pv_enabled()	true
525 
526 #undef pv_init_node
527 #undef pv_wait_node
528 #undef pv_kick_node
529 #undef pv_wait_head_or_lock
530 
531 #undef  queued_spin_lock_slowpath
532 #define queued_spin_lock_slowpath	__pv_queued_spin_lock_slowpath
533 
534 #include "qspinlock_paravirt.h"
535 #include "qspinlock.c"
536 
537 #endif
538