aboutsummaryrefslogtreecommitdiff
path: root/include/linux/percpu-rwsem.h
blob: 79b99d653e030d113e4401fc26c7b47e81dcff8c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PERCPU_RWSEM_H
#define _LINUX_PERCPU_RWSEM_H

#include <linux/atomic.h>
#include <linux/rwsem.h>
#include <linux/percpu.h>
#include <linux/rcuwait.h>
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>

struct percpu_rw_semaphore {
	struct rcu_sync		rss;
	unsigned int __percpu	*read_count;
	struct rw_semaphore	rw_sem; /* slowpath */
	struct rcuwait          writer; /* blocked writer */
	int			readers_block;
};

#define DEFINE_STATIC_PERCPU_RWSEM(name)				\
static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name);		\
static struct percpu_rw_semaphore name = {				\
	.rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC),	\
	.read_count = &__percpu_rwsem_rc_##name,			\
	.rw_sem = __RWSEM_INITIALIZER(name.rw_sem),			\
	.writer = __RCUWAIT_INITIALIZER(name.writer),			\
}

extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *);

static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
{
	might_sleep();

	rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);

	preempt_disable();
	/*
	 * We are in an RCU-sched read-side critical section, so the writer
	 * cannot both change sem->state from readers_fast and start checking
	 * counters while we are here. So if we see !sem->state, we know that
	 * the writer won't be checking until we're past the preempt_enable()
	 * and that one the synchronize_sched() is done, the writer will see
	 * anything we did within this RCU-sched read-size critical section.
	 */
	__this_cpu_inc(*sem->read_count);
	if (unlikely(!rcu_sync_is_idle(&sem->rss)))
		__percpu_down_read(sem, false); /* Unconditional memory barrier */
	barrier();
	/*
	 * The barrier() prevents the compiler from
	 * bleeding the critical section out.
	 */
}

static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
	percpu_down_read_preempt_disable(sem);
	preempt_enable();
}

static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
	int ret = 1;

	preempt_disable();
	/*
	 * Same as in percpu_down_read().
	 */
	__this_cpu_inc(*sem->read_count);
	if (unlikely(!rcu_sync_is_idle(&sem->rss)))
		ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
	preempt_enable();
	/*
	 * The barrier() from preempt_enable() prevents the compiler from
	 * bleeding the critical section out.
	 */

	if (ret)
		rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);

	return ret;
}

static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
{
	/*
	 * The barrier() prevents the compiler from
	 * bleeding the critical section out.
	 */
	barrier();
	/*
	 * Same as in percpu_down_read().
	 */
	if (likely(rcu_sync_is_idle(&sem->rss)))
		__this_cpu_dec(*sem->read_count);
	else
		__percpu_up_read(sem); /* Unconditional memory barrier */
	preempt_enable();

	rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}

static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
	preempt_disable();
	percpu_up_read_preempt_enable(sem);
}

extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);

extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
				const char *, struct lock_class_key *);

extern void percpu_free_rwsem(struct percpu_rw_semaphore *);

#define percpu_init_rwsem(sem)					\
({								\
	static struct lock_class_key rwsem_key;			\
	__percpu_init_rwsem(sem, #sem, &rwsem_key);		\
})

#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)

#define percpu_rwsem_assert_held(sem)				\
	lockdep_assert_held(&(sem)->rw_sem)

static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
					bool read, unsigned long ip)
{
	lock_release(&sem->rw_sem.dep_map, 1, ip);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
	if (!read)
		sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN;
#endif
}

static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
					bool read, unsigned long ip)
{
	lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
	if (!read)
		sem->rw_sem.owner = current;
#endif
}

#endif