1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_U64_STATS_SYNC_H 3 #define _LINUX_U64_STATS_SYNC_H 4 5 /* 6 * Protect against 64-bit values tearing on 32-bit architectures. This is 7 * typically used for statistics read/update in different subsystems. 8 * 9 * Key points : 10 * 11 * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. 12 * - The whole thing is a no-op on 64-bit architectures. 13 * 14 * Usage constraints: 15 * 16 * 1) Write side must ensure mutual exclusion, or one seqcount update could 17 * be lost, thus blocking readers forever. 18 * 19 * 2) Write side must disable preemption, or a seqcount reader can preempt the 20 * writer and also spin forever. 21 * 22 * 3) Write side must use the _irqsave() variant if other writers, or a reader, 23 * can be invoked from an IRQ context. 24 * 25 * 4) If reader fetches several counters, there is no guarantee the whole values 26 * are consistent w.r.t. each other (remember point #2: seqcounts are not 27 * used for 64bit architectures). 28 * 29 * 5) Readers are allowed to sleep or be preempted/interrupted: they perform 30 * pure reads. 31 * 32 * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats 33 * might be updated from a hardirq or softirq context (remember point #1: 34 * seqcounts are not used for UP kernels). 32-bit UP stat readers could read 35 * corrupted 64-bit values otherwise. 36 * 37 * Usage : 38 * 39 * Stats producer (writer) should use following template granted it already got 40 * an exclusive access to counters (a lock is already taken, or per cpu 41 * data is used [in a non preemptable context]) 42 * 43 * spin_lock_bh(...) or other synchronization to get exclusive access 44 * ... 45 * u64_stats_update_begin(&stats->syncp); 46 * u64_stats_add(&stats->bytes64, len); // non atomic operation 47 * u64_stats_inc(&stats->packets64); // non atomic operation 48 * u64_stats_update_end(&stats->syncp); 49 * 50 * While a consumer (reader) should use following template to get consistent 51 * snapshot for each variable (but no guarantee on several ones) 52 * 53 * u64 tbytes, tpackets; 54 * unsigned int start; 55 * 56 * do { 57 * start = u64_stats_fetch_begin(&stats->syncp); 58 * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation 59 * tpackets = u64_stats_read(&stats->packets64); // non atomic operation 60 * } while (u64_stats_fetch_retry(&stats->syncp, start)); 61 * 62 * 63 * Example of use in drivers/net/loopback.c, using per_cpu containers, 64 * in BH disabled context. 65 */ 66 #include <linux/seqlock.h> 67 68 struct u64_stats_sync { 69 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 70 seqcount_t seq; 71 #endif 72 }; 73 74 #if BITS_PER_LONG == 64 75 #include <asm/local64.h> 76 77 typedef struct { 78 local64_t v; 79 } u64_stats_t ; 80 81 static inline u64 u64_stats_read(const u64_stats_t *p) 82 { 83 return local64_read(&p->v); 84 } 85 86 static inline void u64_stats_add(u64_stats_t *p, unsigned long val) 87 { 88 local64_add(val, &p->v); 89 } 90 91 static inline void u64_stats_inc(u64_stats_t *p) 92 { 93 local64_inc(&p->v); 94 } 95 96 #else 97 98 typedef struct { 99 u64 v; 100 } u64_stats_t; 101 102 static inline u64 u64_stats_read(const u64_stats_t *p) 103 { 104 return p->v; 105 } 106 107 static inline void u64_stats_add(u64_stats_t *p, unsigned long val) 108 { 109 p->v += val; 110 } 111 112 static inline void u64_stats_inc(u64_stats_t *p) 113 { 114 p->v++; 115 } 116 #endif 117 118 static inline void u64_stats_init(struct u64_stats_sync *syncp) 119 { 120 #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) 121 seqcount_init(&syncp->seq); 122 #endif 123 } 124 125 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) 126 { 127 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 128 write_seqcount_begin(&syncp->seq); 129 #endif 130 } 131 132 static inline void u64_stats_update_end(struct u64_stats_sync *syncp) 133 { 134 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 135 write_seqcount_end(&syncp->seq); 136 #endif 137 } 138 139 static inline unsigned long 140 u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) 141 { 142 unsigned long flags = 0; 143 144 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 145 local_irq_save(flags); 146 write_seqcount_begin(&syncp->seq); 147 #endif 148 return flags; 149 } 150 151 static inline void 152 u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, 153 unsigned long flags) 154 { 155 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 156 write_seqcount_end(&syncp->seq); 157 local_irq_restore(flags); 158 #endif 159 } 160 161 static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 162 { 163 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 164 return read_seqcount_begin(&syncp->seq); 165 #else 166 return 0; 167 #endif 168 } 169 170 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 171 { 172 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 173 preempt_disable(); 174 #endif 175 return __u64_stats_fetch_begin(syncp); 176 } 177 178 static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 179 unsigned int start) 180 { 181 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 182 return read_seqcount_retry(&syncp->seq, start); 183 #else 184 return false; 185 #endif 186 } 187 188 static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 189 unsigned int start) 190 { 191 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 192 preempt_enable(); 193 #endif 194 return __u64_stats_fetch_retry(syncp, start); 195 } 196 197 /* 198 * In case irq handlers can update u64 counters, readers can use following helpers 199 * - SMP 32bit arches use seqcount protection, irq safe. 200 * - UP 32bit must disable irqs. 201 * - 64bit have no problem atomically reading u64 values, irq safe. 202 */ 203 static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) 204 { 205 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 206 local_irq_disable(); 207 #endif 208 return __u64_stats_fetch_begin(syncp); 209 } 210 211 static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, 212 unsigned int start) 213 { 214 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 215 local_irq_enable(); 216 #endif 217 return __u64_stats_fetch_retry(syncp, start); 218 } 219 220 #endif /* _LINUX_U64_STATS_SYNC_H */ 221