Skip to content

Commit

Permalink
smp: avoid redundancy
Browse files Browse the repository at this point in the history
Signed-off-by: Axel Heider <axelheider@gmx.de>
  • Loading branch information
axel-h committed Nov 10, 2024
1 parent 29a9ec3 commit 3a79a19
Showing 1 changed file with 6 additions and 5 deletions.
11 changes: 6 additions & 5 deletions include/smp/lock.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,9 @@ static inline bool_t FORCE_INLINE clh_is_ipi_pending(word_t cpu)

static inline void FORCE_INLINE clh_lock_acquire(word_t cpu, bool_t irqPath)
{
big_kernel_lock.node_owners[cpu].node->value = CLHState_Pending;

clh_qnode_p_t volatile *node_owner = &big_kernel_lock.node_owners[cpu];
node_owner->node->value = CLHState_Pending;

__atomic_thread_fence(__ATOMIC_RELEASE); /* writes must finish */
/* Unfortunately, the compiler builtin __atomic_exchange_n() cannot be used
* here, because some architectures lack an actual atomic swap instruction
Expand All @@ -73,15 +74,15 @@ static inline void FORCE_INLINE clh_lock_acquire(word_t cpu, bool_t irqPath)
* untouched.
*/
while (!try_arch_atomic_exchange_rlx(&big_kernel_lock.head,
big_kernel_lock.node_owners[cpu].node,
&big_kernel_lock.node_owners[cpu].next)) {
node_owner->node,
&node_owner->next)) {
/* busy waiting */
}
__atomic_thread_fence(__ATOMIC_ACQUIRE); /* prevent reads before passing here */

/* We do not have an __atomic_thread_fence here as this is already handled by the
* atomic_exchange just above */
while (big_kernel_lock.node_owners[cpu].next->value != CLHState_Granted) {
while (node_owner->next->value != CLHState_Granted) {
/* As we are in a loop we need to ensure that any loads of future iterations of the
* loop are performed after this one */
__atomic_thread_fence(__ATOMIC_ACQUIRE);
Expand Down

0 comments on commit 3a79a19

Please sign in to comment.