Skip to content

Commit 3ab2ee3

Browse files
committed
locking/rwsem: Always try to wake waiters in out_nolock path
jira LE-1907 Rebuild_History Non-Buildable kernel-rt-5.14.0-284.30.1.rt14.315.el9_2 commit-author Waiman Long <longman@redhat.com> commit 1ee3261 Empty-Commit: Cherry-Pick Conflicts during history rebuild. Will be included in final tarball splat. Ref for failed cherry-pick at: ciq/ciq_backports/kernel-rt-5.14.0-284.30.1.rt14.315.el9_2/1ee32619.failed For writers, the out_nolock path will always attempt to wake up waiters. This may not be really necessary if the waiter to be removed is not the first one. For readers, no attempt to wake up waiter is being made. However, if the HANDOFF bit is set and the reader to be removed is the first waiter, the waiter behind it will inherit the HANDOFF bit and for a write lock waiter waking it up will allow it to spin on the lock to acquire it faster. So it can be beneficial to do a wakeup in this case. Add a new rwsem_del_wake_waiter() helper function to do that consistently for both reader and writer out_nolock paths. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220322152059.2182333-4-longman@redhat.com (cherry picked from commit 1ee3261) Signed-off-by: Jonathan Maple <jmaple@ciq.com> # Conflicts: # kernel/locking/rwsem.c
1 parent fd137e9 commit 3ab2ee3

File tree

1 file changed

+115
-0
lines changed

1 file changed

+115
-0
lines changed
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
locking/rwsem: Always try to wake waiters in out_nolock path
2+
3+
jira LE-1907
4+
Rebuild_History Non-Buildable kernel-rt-5.14.0-284.30.1.rt14.315.el9_2
5+
commit-author Waiman Long <longman@redhat.com>
6+
commit 1ee326196c66583006b0c95356a4b7dc51bf3531
7+
Empty-Commit: Cherry-Pick Conflicts during history rebuild.
8+
Will be included in final tarball splat. Ref for failed cherry-pick at:
9+
ciq/ciq_backports/kernel-rt-5.14.0-284.30.1.rt14.315.el9_2/1ee32619.failed
10+
11+
For writers, the out_nolock path will always attempt to wake up waiters.
12+
This may not be really necessary if the waiter to be removed is not the
13+
first one.
14+
15+
For readers, no attempt to wake up waiter is being made. However, if
16+
the HANDOFF bit is set and the reader to be removed is the first waiter,
17+
the waiter behind it will inherit the HANDOFF bit and for a write lock
18+
waiter waking it up will allow it to spin on the lock to acquire it
19+
faster. So it can be beneficial to do a wakeup in this case.
20+
21+
Add a new rwsem_del_wake_waiter() helper function to do that consistently
22+
for both reader and writer out_nolock paths.
23+
24+
Signed-off-by: Waiman Long <longman@redhat.com>
25+
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
26+
Link: https://lkml.kernel.org/r/20220322152059.2182333-4-longman@redhat.com
27+
(cherry picked from commit 1ee326196c66583006b0c95356a4b7dc51bf3531)
28+
Signed-off-by: Jonathan Maple <jmaple@ciq.com>
29+
30+
# Conflicts:
31+
# kernel/locking/rwsem.c
32+
diff --cc kernel/locking/rwsem.c
33+
index 186ad9eda88d,16b532bb5b92..000000000000
34+
--- a/kernel/locking/rwsem.c
35+
+++ b/kernel/locking/rwsem.c
36+
@@@ -373,6 -362,34 +373,37 @@@ enum writer_wait_state
37+
*/
38+
#define MAX_READERS_WAKEUP 0x100
39+
40+
++<<<<<<< HEAD
41+
++=======
42+
+ static inline void
43+
+ rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
44+
+ {
45+
+ lockdep_assert_held(&sem->wait_lock);
46+
+ list_add_tail(&waiter->list, &sem->wait_list);
47+
+ /* caller will set RWSEM_FLAG_WAITERS */
48+
+ }
49+
+
50+
+ /*
51+
+ * Remove a waiter from the wait_list and clear flags.
52+
+ *
53+
+ * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
54+
+ * this function. Modify with care.
55+
+ *
56+
+ * Return: true if wait_list isn't empty and false otherwise
57+
+ */
58+
+ static inline bool
59+
+ rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
60+
+ {
61+
+ lockdep_assert_held(&sem->wait_lock);
62+
+ list_del(&waiter->list);
63+
+ if (likely(!list_empty(&sem->wait_list)))
64+
+ return true;
65+
+
66+
+ atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
67+
+ return false;
68+
+ }
69+
+
70+
++>>>>>>> 1ee326196c66 (locking/rwsem: Always try to wake waiters in out_nolock path)
71+
/*
72+
* handle the lock release when processes blocked on it that can now run
73+
* - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
74+
@@@ -1013,12 -1080,7 +1071,16 @@@ queue
75+
return sem;
76+
77+
out_nolock:
78+
++<<<<<<< HEAD
79+
+ list_del(&waiter.list);
80+
+ if (list_empty(&sem->wait_list)) {
81+
+ atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
82+
+ &sem->count);
83+
+ }
84+
+ raw_spin_unlock_irq(&sem->wait_lock);
85+
++=======
86+
+ rwsem_del_wake_waiter(sem, &waiter, &wake_q);
87+
++>>>>>>> 1ee326196c66 (locking/rwsem: Always try to wake waiters in out_nolock path)
88+
__set_current_state(TASK_RUNNING);
89+
lockevent_inc(rwsem_rlock_fail);
90+
return ERR_PTR(-EINTR);
91+
@@@ -1151,19 -1176,8 +1212,23 @@@ trylock_again
92+
out_nolock:
93+
__set_current_state(TASK_RUNNING);
94+
raw_spin_lock_irq(&sem->wait_lock);
95+
++<<<<<<< HEAD
96+
+ list_del(&waiter.list);
97+
+
98+
+ if (unlikely(wstate == WRITER_HANDOFF))
99+
+ atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
100+
+
101+
+ if (list_empty(&sem->wait_list))
102+
+ atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
103+
+ else
104+
+ rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
105+
+ raw_spin_unlock_irq(&sem->wait_lock);
106+
+ wake_up_q(&wake_q);
107+
++=======
108+
+ rwsem_del_wake_waiter(sem, &waiter, &wake_q);
109+
++>>>>>>> 1ee326196c66 (locking/rwsem: Always try to wake waiters in out_nolock path)
110+
lockevent_inc(rwsem_wlock_fail);
111+
+
112+
return ERR_PTR(-EINTR);
113+
}
114+
115+
* Unmerged path kernel/locking/rwsem.c

0 commit comments

Comments
 (0)