libcoap 4.3.5rc1
Loading...
Searching...
No Matches
coap_threadsafe_internal.h
Go to the documentation of this file.
1/*
2 * coap_threadsafe_internal.h -- Mapping of threadsafe functions
3 *
4 * Copyright (C) 2023-2024 Jon Shallow <supjps-libcoap@jpshallow.com>
5 *
6 * SPDX-License-Identifier: BSD-2-Clause
7 *
8 * This file is part of the CoAP library libcoap. Please see README for terms
9 * of use.
10 */
11
17#ifndef COAP_THREADSAFE_INTERNAL_H_
18#define COAP_THREADSAFE_INTERNAL_H_
19
20/*
21 * Support thread safe access into libcoap
22 *
23 * Locking at different component levels (i.e context and session) is
24 * problematic in that coap_process_io() needs to lock the context as
25 * it scans for all the sessions and then could lock the session being
26 * processed as well - but context needs to remain locked as a list is
27 * being scanned.
28 *
29 * Then if the session process needs to update context ( e.g. delayqueue),
30 * context needs to be locked. So, if coap_send() is done on a session,
31 * it has to be locked, but a retransmission of a PDU by coap_process_io()
32 * has the context already locked.
33 *
34 * So the initial support for thread safe is done at the context level.
35 *
36 * Any public API call needs to potentially lock context, as there may be
37 * multiple contexts. If a public API needs thread safe protection, the
38 * coap_X() function locks the context lock, calls the coap_X_lkd() function
39 * that does all the work and on return unlocks the context before returning
40 * to the caller of coap_X().
41 *
42 * Any internal libcoap calls that are to the public API coap_X() must call
43 * coap_X_lkd() if the calling code is already locked.
44 *
45 * Any call-back into app space must be done by using the coap_lock_callback()
46 * (or coap_lock_callback_ret()) wrapper where the context remains locked.
47 *
48 * Note:
49 * libcoap may call a handler, which may in turn call into libcoap, which may
50 * then call a handler. context will remain locked thoughout this process
51 * by the same thread.
52 *
53 * Alternatively, coap_lock_callback_release() (or
54 * coap_lock_callback_ret_release()), is used where the context is unlocked
55 * for the duration of the call-back. Used for things like a request
56 * handler which could be busy for some time.
57 *
58 * Note: On return from the call-back, the code has to be careful not to
59 * use memory locations that make have been updated in the call-back by
60 * calling a Public API.
61 *
62 * Any wait on select() or equivalent when a thread is waiting on an event
63 * must be preceded by unlock context, and then context re-locked after
64 * return;
65 *
66 * To check for recursive deadlock coding errors, COAP_THREAD_RECURSIVE_CHECK
67 * needs to be defined.
68 *
69 * If thread safe is not enabled, then locking of the context does not take
70 * place.
71 */
72#if COAP_THREAD_SAFE
73# if COAP_THREAD_RECURSIVE_CHECK
74
75/*
76 * Locking, with deadlock detection
77 */
78typedef struct coap_lock_t {
79 coap_mutex_t mutex;
81 coap_thread_pid_t freeing_pid;
82 const char *lock_file;
83 unsigned int lock_line;
84 unsigned int unlock_line;
85 const char *unlock_file;
86 const char *callback_file;
87 unsigned int callback_line;
88 unsigned int being_freed;
89 unsigned int in_callback;
90 unsigned int lock_count;
92
102void coap_lock_unlock_func(coap_lock_t *lock, const char *file, int line);
103
117int coap_lock_lock_func(coap_lock_t *lock, int force, const char *file, int line);
118
134#define coap_lock_lock(c,failed) do { \
135 assert(c); \
136 if (!coap_lock_lock_func(&(c)->lock, 0, __FILE__, __LINE__)) { \
137 failed; \
138 } \
139 } while (0)
140
148#define coap_lock_unlock(c) do { \
149 assert(c); \
150 coap_lock_unlock_func(&(c)->lock, __FILE__, __LINE__); \
151 } while (0)
152
162#define coap_lock_callback(c,func) do { \
163 int being_freed = (c)->lock.being_freed && coap_thread_pid == (c)->lock.freeing_pid; \
164 if (being_freed) { \
165 coap_lock_lock_func(&(c)->lock, 1, __FILE__, __LINE__); \
166 } else { \
167 coap_lock_check_locked(c); \
168 } \
169 (c)->lock.in_callback++; \
170 (c)->lock.callback_file = __FILE__; \
171 (c)->lock.callback_line = __LINE__; \
172 func; \
173 (c)->lock.in_callback--; \
174 if (being_freed) { \
175 coap_lock_unlock_func(&(c)->lock, __FILE__, __LINE__); \
176 } \
177 } while (0)
178
189#define coap_lock_callback_ret(r,c,func) do { \
190 int being_freed = (c)->lock.being_freed && coap_thread_pid == (c)->lock.freeing_pid; \
191 if (being_freed) { \
192 coap_lock_lock_func(&(c)->lock, 1, __FILE__, __LINE__); \
193 } else { \
194 coap_lock_check_locked(c); \
195 } \
196 (c)->lock.in_callback++; \
197 (c)->lock.callback_file = __FILE__; \
198 (c)->lock.callback_line = __LINE__; \
199 (r) = func; \
200 (c)->lock.in_callback--; \
201 if (being_freed) { \
202 coap_lock_unlock_func(&(c)->lock, __FILE__, __LINE__); \
203 } \
204 } while (0)
205
216#define coap_lock_callback_release(c,func,failed) do { \
217 int being_freed = (c)->lock.being_freed && coap_thread_pid == (c)->lock.freeing_pid; \
218 if (!being_freed) { \
219 coap_lock_check_locked(c); \
220 coap_lock_unlock(c); \
221 func; \
222 coap_lock_lock(c,failed); \
223 } else { \
224 func; \
225 } \
226 } while (0)
227
239#define coap_lock_callback_ret_release(r,c,func,failed) do { \
240 int being_freed = (c)->lock.being_freed && coap_thread_pid == (c)->lock.freeing_pid; \
241 if (!being_freed) { \
242 coap_lock_check_locked(c); \
243 coap_lock_unlock(c); \
244 (r) = func; \
245 coap_lock_lock(c,failed); \
246 } else { \
247 (r) = func; \
248 } \
249 } while (0)
250
251# else /* ! COAP_THREAD_RECURSIVE_CHECK */
252
253/*
254 * Locking, but no deadlock detection
255 */
256typedef struct coap_lock_t {
257 coap_mutex_t mutex;
259 coap_thread_pid_t freeing_pid;
260 uint32_t being_freed;
261 uint32_t in_callback;
262 volatile uint32_t lock_count;
264
272void coap_lock_unlock_func(coap_lock_t *lock);
273
285int coap_lock_lock_func(coap_lock_t *lock, int force);
286
302#define coap_lock_lock(c,failed) do { \
303 assert(c); \
304 if (!coap_lock_lock_func(&(c)->lock, 0)) { \
305 failed; \
306 } \
307 } while (0)
308
316#define coap_lock_unlock(c) do { \
317 assert(c); \
318 coap_lock_unlock_func(&(c)->lock); \
319 } while (0)
320
330#define coap_lock_callback(c,func) do { \
331 int being_freed = (c)->lock.being_freed && coap_thread_pid == (c)->lock.freeing_pid; \
332 if (being_freed) { \
333 coap_lock_lock_func(&(c)->lock, 1); \
334 } else { \
335 coap_lock_check_locked(c); \
336 } \
337 (c)->lock.in_callback++; \
338 func; \
339 (c)->lock.in_callback--; \
340 if (being_freed) { \
341 coap_lock_unlock_func(&(c)->lock); \
342 } \
343 } while (0)
344
355#define coap_lock_callback_ret(r,c,func) do { \
356 int being_freed = (c)->lock.being_freed && coap_thread_pid == (c)->lock.freeing_pid; \
357 if (being_freed) { \
358 coap_lock_lock_func(&(c)->lock, 1); \
359 } else { \
360 coap_lock_check_locked(c); \
361 } \
362 (c)->lock.in_callback++; \
363 (c)->lock.in_callback++; \
364 (r) = func; \
365 (c)->lock.in_callback--; \
366 if (being_freed) { \
367 coap_lock_unlock_func(&(c)->lock); \
368 } \
369 } while (0)
370
381#define coap_lock_callback_release(c,func,failed) do { \
382 int being_freed = (c)->lock.being_freed && coap_thread_pid == (c)->lock.freeing_pid; \
383 if (!being_freed) { \
384 coap_lock_check_locked(c); \
385 coap_lock_unlock(c); \
386 func; \
387 coap_lock_lock(c,failed); \
388 } else { \
389 func; \
390 } \
391 } while (0)
392
404#define coap_lock_callback_ret_release(r,c,func,failed) do { \
405 int being_freed = (c)->lock.being_freed && coap_thread_pid == (c)->lock.freeing_pid; \
406 if (!being_freed) { \
407 coap_lock_check_locked(c); \
408 coap_lock_unlock(c); \
409 (r) = func; \
410 coap_lock_lock(c,failed); \
411 } else { \
412 (r) = func; \
413 } \
414 } while (0)
415
416# endif /* ! COAP_THREAD_RECURSIVE_CHECK */
417
418#define coap_lock_init(c) do { \
419 assert(c); \
420 memset(&((c)->lock), 0, sizeof((c)->lock)); \
421 coap_mutex_init(&(c)->lock.mutex); \
422 } while (0)
423
424#define coap_lock_being_freed(c,failed) do { \
425 coap_lock_lock(c,failed); \
426 (c)->lock.being_freed = 1; \
427 (c)->lock.freeing_pid = coap_thread_pid; \
428 coap_lock_unlock(c); \
429 } while (0)
430
431#define coap_lock_check_locked(c) do { \
432 assert((c) && \
433 coap_thread_pid == ((c)->lock.being_freed ? (c)->lock.freeing_pid : \
434 (c)->lock.pid)); \
435 } while (0)
436
437#define coap_lock_invert(c,func,f) do { \
438 coap_lock_check_locked(c); \
439 if (!(c)->lock.being_freed) { \
440 coap_lock_unlock(c); \
441 func; \
442 coap_lock_lock(c,f); \
443 } else { \
444 func; \
445 } \
446 } while (0)
447
448#else /* ! COAP_THREAD_SAFE */
449
450/*
451 * No locking - single thread
452 */
454
455#define coap_lock_lock(c,failed)
456#define coap_lock_unlock(c)
457#define coap_lock_init(c)
458#define coap_lock_being_freed(c,failed)
459#define coap_lock_check_locked(c) {}
460#define coap_lock_callback(c,func) func
461#define coap_lock_callback_ret(r,c,func) (r) = func
462#define coap_lock_callback_release(c,func,failed) func
463#define coap_lock_callback_ret_release(r,c,func,failed) (r) = func
464#define coap_lock_invert(c,func,f) func
465
466#endif /* ! COAP_THREAD_SAFE */
467
468#endif /* COAP_THREADSAFE_INTERNAL_H_ */
int coap_mutex_t
#define coap_thread_pid_t
coap_mutex_t coap_lock_t