DPDK  19.11.6
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
rte_mcslock.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4 
5 #ifndef _RTE_MCSLOCK_H_
6 #define _RTE_MCSLOCK_H_
7 
22 #include <rte_lcore.h>
23 #include <rte_common.h>
24 #include <rte_pause.h>
25 
29 typedef struct rte_mcslock {
30  struct rte_mcslock *next;
31  int locked; /* 1 if the queue locked, 0 otherwise */
33 
48 __rte_experimental
49 static inline void
51 {
52  rte_mcslock_t *prev;
53 
54  /* Init me node */
55  __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);
56  __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
57 
58  /* If the queue is empty, the exchange operation is enough to acquire
59  * the lock. Hence, the exchange operation requires acquire semantics.
60  * The store to me->next above should complete before the node is
61  * visible to other CPUs/threads. Hence, the exchange operation requires
62  * release semantics as well.
63  */
64  prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL);
65  if (likely(prev == NULL)) {
66  /* Queue was empty, no further action required,
67  * proceed with lock taken.
68  */
69  return;
70  }
71  /* The store to me->next above should also complete before the node is
72  * visible to predecessor thread releasing the lock. Hence, the store
73  * prev->next also requires release semantics. Note that, for example,
74  * on ARM, the release semantics in the exchange operation is not
75  * strong as a release fence and is not sufficient to enforce the
76  * desired order here.
77  */
78  __atomic_store_n(&prev->next, me, __ATOMIC_RELEASE);
79 
80  /* The while-load of me->locked should not move above the previous
81  * store to prev->next. Otherwise it will cause a deadlock. Need a
82  * store-load barrier.
83  */
84  __atomic_thread_fence(__ATOMIC_ACQ_REL);
85  /* If the lock has already been acquired, it first atomically
86  * places the node at the end of the queue and then proceeds
87  * to spin on me->locked until the previous lock holder resets
88  * the me->locked using mcslock_unlock().
89  */
90  while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE))
91  rte_pause();
92 }
93 
105 __rte_experimental
106 static inline void
108 {
109  /* Check if there are more nodes in the queue. */
110  if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) {
111  /* No, last member in the queue. */
112  rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED);
113 
114  /* Release the lock by setting it to NULL */
115  if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0,
116  __ATOMIC_RELEASE, __ATOMIC_RELAXED)))
117  return;
118 
119  /* Speculative execution would be allowed to read in the
120  * while-loop first. This has the potential to cause a
121  * deadlock. Need a load barrier.
122  */
123  __atomic_thread_fence(__ATOMIC_ACQUIRE);
124  /* More nodes added to the queue by other CPUs.
125  * Wait until the next pointer is set.
126  */
127  while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)
128  rte_pause();
129  }
130 
131  /* Pass lock to next waiter. */
132  __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);
133 }
134 
148 __rte_experimental
149 static inline int
151 {
152  /* Init me node */
153  __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
154 
155  /* Try to lock */
156  rte_mcslock_t *expected = NULL;
157 
158  /* The lock can be taken only when the queue is empty. Hence,
159  * the compare-exchange operation requires acquire semantics.
160  * The store to me->next above should complete before the node
161  * is visible to other CPUs/threads. Hence, the compare-exchange
162  * operation requires release semantics as well.
163  */
164  return __atomic_compare_exchange_n(msl, &expected, me, 0,
165  __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
166 }
167 
179 __rte_experimental
180 static inline int
182 {
183  return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL);
184 }
185 
186 #endif /* _RTE_MCSLOCK_H_ */
#define likely(x)
static __rte_experimental int rte_mcslock_is_locked(rte_mcslock_t *msl)
Definition: rte_mcslock.h:181
static __rte_experimental void rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:107
struct rte_mcslock rte_mcslock_t
static __rte_experimental int rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:150
static void rte_pause(void)
static __rte_experimental void rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:50