|
2 | 2 | #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ |
3 | 3 | #define _ASM_GENERIC_BITOPS_ATOMIC_H_ |
4 | 4 |
|
5 | | -#include <asm/types.h> |
6 | | -#include <linux/irqflags.h> |
7 | | - |
8 | | -#ifdef CONFIG_SMP |
9 | | -#include <asm/spinlock.h> |
10 | | -#include <asm/cache.h> /* we use L1_CACHE_BYTES */ |
11 | | - |
12 | | -/* Use an array of spinlocks for our atomic_ts. |
13 | | - * Hash function to index into a different SPINLOCK. |
14 | | - * Since "a" is usually an address, use one spinlock per cacheline. |
15 | | - */ |
16 | | -# define ATOMIC_HASH_SIZE 4 |
17 | | -# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
18 | | - |
19 | | -extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
20 | | - |
21 | | -/* Can't use raw_spin_lock_irq because of #include problems, so |
22 | | - * this is the substitute */ |
23 | | -#define _atomic_spin_lock_irqsave(l,f) do { \ |
24 | | - arch_spinlock_t *s = ATOMIC_HASH(l); \ |
25 | | - local_irq_save(f); \ |
26 | | - arch_spin_lock(s); \ |
27 | | -} while(0) |
28 | | - |
29 | | -#define _atomic_spin_unlock_irqrestore(l,f) do { \ |
30 | | - arch_spinlock_t *s = ATOMIC_HASH(l); \ |
31 | | - arch_spin_unlock(s); \ |
32 | | - local_irq_restore(f); \ |
33 | | -} while(0) |
34 | | - |
35 | | - |
36 | | -#else |
37 | | -# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) |
38 | | -# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) |
39 | | -#endif |
| 5 | +#include <linux/atomic.h> |
| 6 | +#include <linux/compiler.h> |
| 7 | +#include <asm/barrier.h> |
40 | 8 |
|
41 | 9 | /* |
42 | | - * NMI events can occur at any time, including when interrupts have been |
43 | | - * disabled by *_irqsave(). So you can get NMI events occurring while a |
44 | | - * *_bit function is holding a spin lock. If the NMI handler also wants |
45 | | - * to do bit manipulation (and they do) then you can get a deadlock |
46 | | - * between the original caller of *_bit() and the NMI handler. |
47 | | - * |
48 | | - * by Keith Owens |
| 10 | + * Implementation of atomic bitops using atomic-fetch ops. |
| 11 | + * See Documentation/atomic_bitops.txt for details. |
49 | 12 | */ |
50 | 13 |
|
51 | | -/** |
52 | | - * set_bit - Atomically set a bit in memory |
53 | | - * @nr: the bit to set |
54 | | - * @addr: the address to start counting from |
55 | | - * |
56 | | - * This function is atomic and may not be reordered. See __set_bit() |
57 | | - * if you do not require the atomic guarantees. |
58 | | - * |
59 | | - * Note: there are no guarantees that this function will not be reordered |
60 | | - * on non x86 architectures, so if you are writing portable code, |
61 | | - * make sure not to rely on its reordering guarantees. |
62 | | - * |
63 | | - * Note that @nr may be almost arbitrarily large; this function is not |
64 | | - * restricted to acting on a single-word quantity. |
65 | | - */ |
66 | | -static inline void set_bit(int nr, volatile unsigned long *addr) |
| 14 | +static inline void set_bit(unsigned int nr, volatile unsigned long *p) |
67 | 15 | { |
68 | | - unsigned long mask = BIT_MASK(nr); |
69 | | - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
70 | | - unsigned long flags; |
71 | | - |
72 | | - _atomic_spin_lock_irqsave(p, flags); |
73 | | - *p |= mask; |
74 | | - _atomic_spin_unlock_irqrestore(p, flags); |
| 16 | + p += BIT_WORD(nr); |
| 17 | + atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); |
75 | 18 | } |
76 | 19 |
|
77 | | -/** |
78 | | - * clear_bit - Clears a bit in memory |
79 | | - * @nr: Bit to clear |
80 | | - * @addr: Address to start counting from |
81 | | - * |
82 | | - * clear_bit() is atomic and may not be reordered. However, it does |
83 | | - * not contain a memory barrier, so if it is used for locking purposes, |
84 | | - * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
85 | | - * in order to ensure changes are visible on other processors. |
86 | | - */ |
87 | | -static inline void clear_bit(int nr, volatile unsigned long *addr) |
| 20 | +static inline void clear_bit(unsigned int nr, volatile unsigned long *p) |
88 | 21 | { |
89 | | - unsigned long mask = BIT_MASK(nr); |
90 | | - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
91 | | - unsigned long flags; |
92 | | - |
93 | | - _atomic_spin_lock_irqsave(p, flags); |
94 | | - *p &= ~mask; |
95 | | - _atomic_spin_unlock_irqrestore(p, flags); |
| 22 | + p += BIT_WORD(nr); |
| 23 | + atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); |
96 | 24 | } |
97 | 25 |
|
98 | | -/** |
99 | | - * change_bit - Toggle a bit in memory |
100 | | - * @nr: Bit to change |
101 | | - * @addr: Address to start counting from |
102 | | - * |
103 | | - * change_bit() is atomic and may not be reordered. It may be |
104 | | - * reordered on other architectures than x86. |
105 | | - * Note that @nr may be almost arbitrarily large; this function is not |
106 | | - * restricted to acting on a single-word quantity. |
107 | | - */ |
108 | | -static inline void change_bit(int nr, volatile unsigned long *addr) |
| 26 | +static inline void change_bit(unsigned int nr, volatile unsigned long *p) |
109 | 27 | { |
110 | | - unsigned long mask = BIT_MASK(nr); |
111 | | - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
112 | | - unsigned long flags; |
113 | | - |
114 | | - _atomic_spin_lock_irqsave(p, flags); |
115 | | - *p ^= mask; |
116 | | - _atomic_spin_unlock_irqrestore(p, flags); |
| 28 | + p += BIT_WORD(nr); |
| 29 | + atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); |
117 | 30 | } |
118 | 31 |
|
119 | | -/** |
120 | | - * test_and_set_bit - Set a bit and return its old value |
121 | | - * @nr: Bit to set |
122 | | - * @addr: Address to count from |
123 | | - * |
124 | | - * This operation is atomic and cannot be reordered. |
125 | | - * It may be reordered on other architectures than x86. |
126 | | - * It also implies a memory barrier. |
127 | | - */ |
128 | | -static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
| 32 | +static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) |
129 | 33 | { |
| 34 | + long old; |
130 | 35 | unsigned long mask = BIT_MASK(nr); |
131 | | - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
132 | | - unsigned long old; |
133 | | - unsigned long flags; |
134 | 36 |
|
135 | | - _atomic_spin_lock_irqsave(p, flags); |
136 | | - old = *p; |
137 | | - *p = old | mask; |
138 | | - _atomic_spin_unlock_irqrestore(p, flags); |
| 37 | + p += BIT_WORD(nr); |
| 38 | + if (READ_ONCE(*p) & mask) |
| 39 | + return 1; |
139 | 40 |
|
140 | | - return (old & mask) != 0; |
| 41 | + old = atomic_long_fetch_or(mask, (atomic_long_t *)p); |
| 42 | + return !!(old & mask); |
141 | 43 | } |
142 | 44 |
|
143 | | -/** |
144 | | - * test_and_clear_bit - Clear a bit and return its old value |
145 | | - * @nr: Bit to clear |
146 | | - * @addr: Address to count from |
147 | | - * |
148 | | - * This operation is atomic and cannot be reordered. |
149 | | - * It can be reorderdered on other architectures other than x86. |
150 | | - * It also implies a memory barrier. |
151 | | - */ |
152 | | -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
| 45 | +static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) |
153 | 46 | { |
| 47 | + long old; |
154 | 48 | unsigned long mask = BIT_MASK(nr); |
155 | | - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
156 | | - unsigned long old; |
157 | | - unsigned long flags; |
158 | 49 |
|
159 | | - _atomic_spin_lock_irqsave(p, flags); |
160 | | - old = *p; |
161 | | - *p = old & ~mask; |
162 | | - _atomic_spin_unlock_irqrestore(p, flags); |
| 50 | + p += BIT_WORD(nr); |
| 51 | + if (!(READ_ONCE(*p) & mask)) |
| 52 | + return 0; |
163 | 53 |
|
164 | | - return (old & mask) != 0; |
| 54 | + old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); |
| 55 | + return !!(old & mask); |
165 | 56 | } |
166 | 57 |
|
167 | | -/** |
168 | | - * test_and_change_bit - Change a bit and return its old value |
169 | | - * @nr: Bit to change |
170 | | - * @addr: Address to count from |
171 | | - * |
172 | | - * This operation is atomic and cannot be reordered. |
173 | | - * It also implies a memory barrier. |
174 | | - */ |
175 | | -static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
| 58 | +static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) |
176 | 59 | { |
| 60 | + long old; |
177 | 61 | unsigned long mask = BIT_MASK(nr); |
178 | | - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
179 | | - unsigned long old; |
180 | | - unsigned long flags; |
181 | | - |
182 | | - _atomic_spin_lock_irqsave(p, flags); |
183 | | - old = *p; |
184 | | - *p = old ^ mask; |
185 | | - _atomic_spin_unlock_irqrestore(p, flags); |
186 | 62 |
|
187 | | - return (old & mask) != 0; |
| 63 | + p += BIT_WORD(nr); |
| 64 | + old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); |
| 65 | + return !!(old & mask); |
188 | 66 | } |
189 | 67 |
|
190 | 68 | #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ |
0 commit comments