linux-zen-server/arch/arc/include/asm/atomic-spinlock.h

103 lines
2.5 KiB
C
Raw Permalink Normal View History

2023-08-30 17:53:23 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_ARC_ATOMIC_SPLOCK_H
#define _ASM_ARC_ATOMIC_SPLOCK_H
/*
* Non hardware assisted Atomic-R-M-W
* Locking would change to irq-disabling only (UP) and spinlocks (SMP)
*/
static inline void arch_atomic_set(atomic_t *v, int i)
{
/*
* Independent of hardware support, all of the atomic_xxx() APIs need
* to follow the same locking rules to make sure that a "hardware"
* atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
* sequence
*
* Thus atomic_set() despite being 1 insn (and seemingly atomic)
* requires the locking.
*/
unsigned long flags;
atomic_ops_lock(flags);
WRITE_ONCE(v->counter, i);
atomic_ops_unlock(flags);
}
#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
atomic_ops_lock(flags); \
v->counter c_op i; \
atomic_ops_unlock(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned int temp; \
\
/* \
* spin lock/unlock provides the needed smp_mb() before/after \
*/ \
atomic_ops_lock(flags); \
temp = v->counter; \
temp c_op i; \
v->counter = temp; \
atomic_ops_unlock(flags); \
\
return temp; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned int orig; \
\
/* \
* spin lock/unlock provides the needed smp_mb() before/after \
*/ \
atomic_ops_lock(flags); \
orig = v->counter; \
v->counter c_op i; \
atomic_ops_unlock(flags); \
\
return orig; \
}
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
#define arch_atomic_andnot arch_atomic_andnot
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#endif