2016-05-18 02:52:22 +08:00
|
|
|
/*
|
2025-03-12 21:35:59 +08:00
|
|
|
* Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
|
2015-10-26 00:43:55 +08:00
|
|
|
*
|
2018-12-06 21:03:01 +08:00
|
|
|
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
2016-05-18 02:52:22 +08:00
|
|
|
* this file except in compliance with the License. You can obtain a copy
|
|
|
|
* in the file LICENSE in the source distribution or at
|
|
|
|
* https://www.openssl.org/source/license.html
|
2015-10-26 00:43:55 +08:00
|
|
|
*/
|
|
|
|
|
2020-10-29 23:17:25 +08:00
|
|
|
/* We need to use the OPENSSL_fork_*() deprecated APIs */
|
|
|
|
#define OPENSSL_SUPPRESS_DEPRECATED
|
|
|
|
|
2025-06-25 01:50:09 +08:00
|
|
|
#if !defined(__GNUC__) || !defined(__ATOMIC_ACQ_REL) || \
|
|
|
|
defined(BROKEN_CLANG_ATOMICS) || defined(OPENSSL_NO_STDIO)
|
|
|
|
/*
|
|
|
|
* we only enable REPORT_RWLOCK_CONTENTION on clang/gcc when we have
|
|
|
|
* atomics available. We do this because we need to use an atomic to track
|
|
|
|
* when we can close the log file. We could use the CRYPTO_atomic_ api
|
|
|
|
* but that requires lock creation which gets us into a bad recursive loop
|
|
|
|
* when we try to initialize the file pointer
|
|
|
|
*/
|
|
|
|
# ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
# warning "RWLOCK CONTENTION REPORTING NOT SUPPORTED, Disabling"
|
|
|
|
# undef REPORT_RWLOCK_CONTENTION
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
# define _GNU_SOURCE
|
|
|
|
# include <execinfo.h>
|
|
|
|
# include <unistd.h>
|
|
|
|
#endif
|
|
|
|
|
2015-10-26 00:43:55 +08:00
|
|
|
#include <openssl/crypto.h>
|
2024-01-12 23:39:56 +08:00
|
|
|
#include <crypto/cryptlib.h>
|
2025-06-13 01:12:14 +08:00
|
|
|
#include <crypto/sparse_array.h>
|
2017-08-04 04:21:01 +08:00
|
|
|
#include "internal/cryptlib.h"
|
2025-06-13 01:12:14 +08:00
|
|
|
#include "internal/threads_common.h"
|
2024-01-12 23:39:56 +08:00
|
|
|
#include "internal/rcu.h"
|
2025-06-25 01:50:09 +08:00
|
|
|
#ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
# include "internal/time.h"
|
|
|
|
#endif
|
2024-01-12 23:39:56 +08:00
|
|
|
#include "rcu_internal.h"
|
2015-10-26 00:43:55 +08:00
|
|
|
|
Make thread sanitizer cope with rcu locks
This is unfortunate, but seems necessecary
tsan in gcc/clang tracks data races by recording memory references made
while various locks are held. If it finds that a given address is
read/written while under lock (or under no locks without the use of
atomics), it issues a warning
this creates a specific problem for rcu, because on the write side of a
critical section, we write data under the protection of a lock, but by
definition the read side has no lock, and so rcu warns us about it,
which is really a false positive, because we know that, even if a
pointer changes its value, the data it points to will be valid.
The best way to fix it, short of implementing tsan hooks for rcu locks
in any thread sanitizer in the field, is to 'fake it'. If thread
sanitization is activated, then in ossl_rcu_write_[lock|unlock] we add
annotations to make the sanitizer think that, after the write lock is
taken, that we immediately unlock it, and lock it right before we unlock
it again. In this way tsan thinks there are no locks held while
referencing protected data on the read or write side.
we still need to use atomics to ensure that tsan recognizes that we are
doing atomic accesses safely, but thats ok, and we still get warnings if
we don't do that properly
Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/23671)
2024-03-01 06:22:06 +08:00
|
|
|
#if defined(__clang__) && defined(__has_feature)
|
|
|
|
# if __has_feature(thread_sanitizer)
|
|
|
|
# define __SANITIZE_THREAD__
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__SANITIZE_THREAD__)
|
|
|
|
# include <sanitizer/tsan_interface.h>
|
|
|
|
# define TSAN_FAKE_UNLOCK(x) __tsan_mutex_pre_unlock((x), 0); \
|
|
|
|
__tsan_mutex_post_unlock((x), 0)
|
|
|
|
|
|
|
|
# define TSAN_FAKE_LOCK(x) __tsan_mutex_pre_lock((x), 0); \
|
|
|
|
__tsan_mutex_post_lock((x), 0, 0)
|
|
|
|
#else
|
|
|
|
# define TSAN_FAKE_UNLOCK(x)
|
|
|
|
# define TSAN_FAKE_LOCK(x)
|
|
|
|
#endif
|
|
|
|
|
2019-06-21 16:31:05 +08:00
|
|
|
#if defined(__sun)
|
|
|
|
# include <atomic.h>
|
|
|
|
#endif
|
|
|
|
|
2022-04-07 21:07:37 +08:00
|
|
|
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
|
|
|
|
/*
|
|
|
|
* OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
|
|
|
|
* __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
|
|
|
|
* rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
|
|
|
|
* All of this makes impossible to use __atomic_is_lock_free here.
|
|
|
|
*
|
|
|
|
* See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
|
|
|
|
*/
|
2024-04-11 23:10:38 +08:00
|
|
|
# define BROKEN_CLANG_ATOMICS
|
2022-04-07 21:07:37 +08:00
|
|
|
#endif
|
|
|
|
|
2015-10-26 00:43:55 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
|
|
|
|
|
2019-05-28 03:03:09 +08:00
|
|
|
# if defined(OPENSSL_SYS_UNIX)
|
|
|
|
# include <sys/types.h>
|
|
|
|
# include <unistd.h>
|
2024-04-11 23:10:38 +08:00
|
|
|
# endif
|
2019-05-28 03:03:09 +08:00
|
|
|
|
2021-06-18 01:18:27 +08:00
|
|
|
# include <assert.h>
|
|
|
|
|
2024-07-30 03:17:07 +08:00
|
|
|
/*
|
|
|
|
* The Non-Stop KLT thread model currently seems broken in its rwlock
|
|
|
|
* implementation
|
|
|
|
*/
|
|
|
|
# if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_)
|
2016-11-29 01:54:43 +08:00
|
|
|
# define USE_RWLOCK
|
|
|
|
# endif
|
2016-11-23 00:37:43 +08:00
|
|
|
|
2024-04-12 16:03:21 +08:00
|
|
|
/*
|
|
|
|
* For all GNU/clang atomic builtins, we also need fallbacks, to cover all
|
|
|
|
* other compilers.
|
|
|
|
|
|
|
|
* Unfortunately, we can't do that with some "generic type", because there's no
|
|
|
|
* guarantee that the chosen generic type is large enough to cover all cases.
|
|
|
|
* Therefore, we implement fallbacks for each applicable type, with composed
|
|
|
|
* names that include the type they handle.
|
|
|
|
*
|
|
|
|
* (an anecdote: we previously tried to use |void *| as the generic type, with
|
|
|
|
* the thought that the pointer itself is the largest type. However, this is
|
|
|
|
* not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
|
|
|
|
*
|
|
|
|
* All applicable ATOMIC_ macros take the intended type as first parameter, so
|
|
|
|
* they can map to the correct fallback function. In the GNU/clang case, that
|
|
|
|
* parameter is simply ignored.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Internal types used with the ATOMIC_ macros, to make it possible to compose
|
|
|
|
* fallback function names.
|
|
|
|
*/
|
|
|
|
typedef void *pvoid;
|
|
|
|
|
|
|
|
# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
|
|
|
|
&& !defined(USE_ATOMIC_FALLBACKS)
|
2025-02-24 14:51:16 +08:00
|
|
|
# define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
|
2024-04-12 16:03:21 +08:00
|
|
|
# define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
|
|
|
|
# define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
|
2024-04-11 23:10:38 +08:00
|
|
|
# define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
|
|
|
|
# define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
|
|
|
|
# else
|
2024-01-12 23:39:56 +08:00
|
|
|
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
|
2024-04-12 16:03:21 +08:00
|
|
|
# define IMPL_fallback_atomic_load_n(t) \
|
2024-05-27 18:00:00 +08:00
|
|
|
static ossl_inline t fallback_atomic_load_n_##t(t *p) \
|
2024-04-12 16:03:21 +08:00
|
|
|
{ \
|
|
|
|
t ret; \
|
|
|
|
\
|
|
|
|
pthread_mutex_lock(&atomic_sim_lock); \
|
|
|
|
ret = *p; \
|
|
|
|
pthread_mutex_unlock(&atomic_sim_lock); \
|
|
|
|
return ret; \
|
|
|
|
}
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
IMPL_fallback_atomic_load_n(uint32_t)
|
2024-04-12 16:03:21 +08:00
|
|
|
IMPL_fallback_atomic_load_n(uint64_t)
|
|
|
|
IMPL_fallback_atomic_load_n(pvoid)
|
|
|
|
|
|
|
|
# define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
|
|
|
|
|
|
|
|
# define IMPL_fallback_atomic_store_n(t) \
|
2024-05-27 18:00:00 +08:00
|
|
|
static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \
|
2024-04-12 16:03:21 +08:00
|
|
|
{ \
|
|
|
|
t ret; \
|
|
|
|
\
|
|
|
|
pthread_mutex_lock(&atomic_sim_lock); \
|
|
|
|
ret = *p; \
|
|
|
|
*p = v; \
|
|
|
|
pthread_mutex_unlock(&atomic_sim_lock); \
|
|
|
|
return ret; \
|
|
|
|
}
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
IMPL_fallback_atomic_store_n(uint32_t)
|
2024-01-12 23:39:56 +08:00
|
|
|
|
2024-04-12 16:03:21 +08:00
|
|
|
# define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
|
2024-01-12 23:39:56 +08:00
|
|
|
|
2024-04-12 16:03:21 +08:00
|
|
|
# define IMPL_fallback_atomic_store(t) \
|
2024-05-27 18:00:00 +08:00
|
|
|
static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \
|
2024-04-12 16:03:21 +08:00
|
|
|
{ \
|
|
|
|
pthread_mutex_lock(&atomic_sim_lock); \
|
|
|
|
*p = *v; \
|
|
|
|
pthread_mutex_unlock(&atomic_sim_lock); \
|
|
|
|
}
|
|
|
|
IMPL_fallback_atomic_store(pvoid)
|
|
|
|
|
|
|
|
# define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The fallbacks that follow don't need any per type implementation, as
|
|
|
|
* they are designed for uint64_t only. If there comes a time when multiple
|
|
|
|
* types need to be covered, it's relatively easy to refactor them the same
|
|
|
|
* way as the fallbacks above.
|
|
|
|
*/
|
2024-01-12 23:39:56 +08:00
|
|
|
|
2024-05-27 18:00:00 +08:00
|
|
|
static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
|
2024-01-12 23:39:56 +08:00
|
|
|
{
|
|
|
|
uint64_t ret;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&atomic_sim_lock);
|
|
|
|
*p += v;
|
|
|
|
ret = *p;
|
|
|
|
pthread_mutex_unlock(&atomic_sim_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-04-11 23:10:38 +08:00
|
|
|
# define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
|
2024-01-12 23:39:56 +08:00
|
|
|
|
2024-05-27 18:00:00 +08:00
|
|
|
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
|
2024-01-12 23:39:56 +08:00
|
|
|
{
|
|
|
|
uint64_t ret;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&atomic_sim_lock);
|
|
|
|
*p -= v;
|
|
|
|
ret = *p;
|
|
|
|
pthread_mutex_unlock(&atomic_sim_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-04-11 23:10:38 +08:00
|
|
|
# define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
|
|
|
|
# endif
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the core of an rcu lock. It tracks the readers and writers for the
|
|
|
|
* current quiescence point for a given lock. Users is the 64 bit value that
|
|
|
|
* stores the READERS/ID as defined above
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct rcu_qp {
|
|
|
|
uint64_t users;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct thread_qp {
|
|
|
|
struct rcu_qp *qp;
|
|
|
|
unsigned int depth;
|
|
|
|
CRYPTO_RCU_LOCK *lock;
|
|
|
|
};
|
|
|
|
|
2024-04-11 23:10:38 +08:00
|
|
|
# define MAX_QPS 10
|
2024-01-12 23:39:56 +08:00
|
|
|
/*
|
|
|
|
* This is the per thread tracking data
|
|
|
|
* that is assigned to each thread participating
|
|
|
|
* in an rcu qp
|
|
|
|
*
|
|
|
|
* qp points to the qp that it last acquired
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct rcu_thr_data {
|
|
|
|
struct thread_qp thread_qps[MAX_QPS];
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the internal version of a CRYPTO_RCU_LOCK
|
|
|
|
* it is cast from CRYPTO_RCU_LOCK
|
|
|
|
*/
|
|
|
|
struct rcu_lock_st {
|
|
|
|
/* Callbacks to call for next ossl_synchronize_rcu */
|
|
|
|
struct rcu_cb_item *cb_items;
|
|
|
|
|
2024-04-16 04:56:29 +08:00
|
|
|
/* The context we are being created against */
|
|
|
|
OSSL_LIB_CTX *ctx;
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
/* Array of quiescent points for synchronization */
|
|
|
|
struct rcu_qp *qp_group;
|
|
|
|
|
2025-03-04 23:32:56 +08:00
|
|
|
/* rcu generation counter for in-order retirement */
|
|
|
|
uint32_t id_ctr;
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
/* Number of elements in qp_group array */
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
uint32_t group_count;
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
/* Index of the current qp in the qp_group array */
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
uint32_t reader_idx;
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
/* value of the next id_ctr value to be retired */
|
|
|
|
uint32_t next_to_retire;
|
|
|
|
|
|
|
|
/* index of the next free rcu_qp in the qp_group */
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
uint32_t current_alloc_idx;
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
/* number of qp's in qp_group array currently being retired */
|
|
|
|
uint32_t writers_alloced;
|
|
|
|
|
|
|
|
/* lock protecting write side operations */
|
|
|
|
pthread_mutex_t write_lock;
|
|
|
|
|
|
|
|
/* lock protecting updates to writers_alloced/current_alloc_idx */
|
|
|
|
pthread_mutex_t alloc_lock;
|
|
|
|
|
|
|
|
/* signal to wake threads waiting on alloc_lock */
|
|
|
|
pthread_cond_t alloc_signal;
|
|
|
|
|
|
|
|
/* lock to enforce in-order retirement */
|
|
|
|
pthread_mutex_t prior_lock;
|
|
|
|
|
|
|
|
/* signal to wake threads waiting on prior_lock */
|
|
|
|
pthread_cond_t prior_signal;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Read side acquisition of the current qp */
|
|
|
|
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
|
|
|
|
{
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
uint32_t qp_idx;
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
/* get the current qp index */
|
|
|
|
for (;;) {
|
2025-03-09 18:20:43 +08:00
|
|
|
qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
/*
|
|
|
|
* Notes on use of __ATOMIC_ACQUIRE
|
|
|
|
* We need to ensure the following:
|
|
|
|
* 1) That subsequent operations aren't optimized by hoisting them above
|
|
|
|
* this operation. Specifically, we don't want the below re-load of
|
|
|
|
* qp_idx to get optimized away
|
|
|
|
* 2) We want to ensure that any updating of reader_idx on the write side
|
|
|
|
* of the lock is flushed from a local cpu cache so that we see any
|
|
|
|
* updates prior to the load. This is a non-issue on cache coherent
|
|
|
|
* systems like x86, but is relevant on other arches
|
|
|
|
*/
|
2025-02-10 00:24:43 +08:00
|
|
|
ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
|
|
|
|
__ATOMIC_ACQUIRE);
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
/* if the idx hasn't changed, we're good, else try again */
|
2024-07-19 16:57:24 +08:00
|
|
|
if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
|
2025-02-10 00:24:43 +08:00
|
|
|
__ATOMIC_RELAXED))
|
2024-01-12 23:39:56 +08:00
|
|
|
break;
|
|
|
|
|
2025-02-10 00:24:43 +08:00
|
|
|
ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
|
|
|
|
__ATOMIC_RELAXED);
|
2024-01-12 23:39:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return &lock->qp_group[qp_idx];
|
|
|
|
}
|
|
|
|
|
2024-04-16 04:56:29 +08:00
|
|
|
static void ossl_rcu_free_local_data(void *arg)
|
|
|
|
{
|
|
|
|
OSSL_LIB_CTX *ctx = arg;
|
2025-06-13 01:12:14 +08:00
|
|
|
struct rcu_thr_data *data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, ctx);
|
2024-06-18 02:12:46 +08:00
|
|
|
|
2025-06-13 01:12:14 +08:00
|
|
|
CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, ctx, NULL);
|
2024-04-16 04:56:29 +08:00
|
|
|
OPENSSL_free(data);
|
|
|
|
}
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
|
|
|
|
{
|
|
|
|
struct rcu_thr_data *data;
|
|
|
|
int i, available_qp = -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we're going to access current_qp here so ask the
|
|
|
|
* processor to fetch it
|
|
|
|
*/
|
2025-06-13 01:12:14 +08:00
|
|
|
data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx);
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
if (data == NULL) {
|
|
|
|
data = OPENSSL_zalloc(sizeof(*data));
|
|
|
|
OPENSSL_assert(data != NULL);
|
2025-06-13 01:12:14 +08:00
|
|
|
CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx, data);
|
2024-04-16 04:56:29 +08:00
|
|
|
ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
|
2024-01-12 23:39:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_QPS; i++) {
|
|
|
|
if (data->thread_qps[i].qp == NULL && available_qp == -1)
|
|
|
|
available_qp = i;
|
|
|
|
/* If we have a hold on this lock already, we're good */
|
|
|
|
if (data->thread_qps[i].lock == lock) {
|
|
|
|
data->thread_qps[i].depth++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we get here, then we don't have a hold on this lock yet
|
|
|
|
*/
|
|
|
|
assert(available_qp != -1);
|
|
|
|
|
|
|
|
data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
|
|
|
|
data->thread_qps[available_qp].depth = 1;
|
|
|
|
data->thread_qps[available_qp].lock = lock;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
|
|
|
|
{
|
|
|
|
int i;
|
2025-06-13 01:12:14 +08:00
|
|
|
struct rcu_thr_data *data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx);
|
2024-01-12 23:39:56 +08:00
|
|
|
uint64_t ret;
|
|
|
|
|
|
|
|
assert(data != NULL);
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_QPS; i++) {
|
|
|
|
if (data->thread_qps[i].lock == lock) {
|
|
|
|
/*
|
2025-02-10 00:24:43 +08:00
|
|
|
* we have to use __ATOMIC_RELEASE here
|
|
|
|
* to ensure that all preceding read instructions complete
|
|
|
|
* before the decrement is visible to ossl_synchronize_rcu
|
2024-01-12 23:39:56 +08:00
|
|
|
*/
|
|
|
|
data->thread_qps[i].depth--;
|
|
|
|
if (data->thread_qps[i].depth == 0) {
|
2024-07-19 16:57:24 +08:00
|
|
|
ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
|
2025-02-10 00:24:43 +08:00
|
|
|
(uint64_t)1, __ATOMIC_RELEASE);
|
2024-01-12 23:39:56 +08:00
|
|
|
OPENSSL_assert(ret != UINT64_MAX);
|
|
|
|
data->thread_qps[i].qp = NULL;
|
|
|
|
data->thread_qps[i].lock = NULL;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
2024-02-13 04:22:30 +08:00
|
|
|
* If we get here, we're trying to unlock a lock that we never acquired -
|
|
|
|
* that's fatal.
|
2024-01-12 23:39:56 +08:00
|
|
|
*/
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write side allocation routine to get the current qp
|
|
|
|
* and replace it with a new one
|
|
|
|
*/
|
2025-02-10 00:24:43 +08:00
|
|
|
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
|
2024-01-12 23:39:56 +08:00
|
|
|
{
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
uint32_t current_idx;
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
pthread_mutex_lock(&lock->alloc_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we need at least one qp to be available with one
|
|
|
|
* left over, so that readers can start working on
|
|
|
|
* one that isn't yet being waited on
|
|
|
|
*/
|
|
|
|
while (lock->group_count - lock->writers_alloced < 2)
|
|
|
|
/* we have to wait for one to be free */
|
|
|
|
pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
|
|
|
|
|
|
|
|
current_idx = lock->current_alloc_idx;
|
|
|
|
|
|
|
|
/* Allocate the qp */
|
|
|
|
lock->writers_alloced++;
|
|
|
|
|
|
|
|
/* increment the allocation index */
|
|
|
|
lock->current_alloc_idx =
|
|
|
|
(lock->current_alloc_idx + 1) % lock->group_count;
|
|
|
|
|
2025-02-10 00:24:43 +08:00
|
|
|
*curr_id = lock->id_ctr;
|
2024-01-12 23:39:56 +08:00
|
|
|
lock->id_ctr++;
|
|
|
|
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
|
2025-02-10 00:24:43 +08:00
|
|
|
__ATOMIC_RELAXED);
|
2024-01-12 23:39:56 +08:00
|
|
|
|
2025-03-04 06:46:12 +08:00
|
|
|
/*
|
|
|
|
* this should make sure that the new value of reader_idx is visible in
|
|
|
|
* get_hold_current_qp, directly after incrementing the users count
|
|
|
|
*/
|
|
|
|
ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
|
|
|
|
__ATOMIC_RELEASE);
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
/* wake up any waiters */
|
|
|
|
pthread_cond_signal(&lock->alloc_signal);
|
|
|
|
pthread_mutex_unlock(&lock->alloc_lock);
|
|
|
|
return &lock->qp_group[current_idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&lock->alloc_lock);
|
|
|
|
lock->writers_alloced--;
|
|
|
|
pthread_cond_signal(&lock->alloc_signal);
|
|
|
|
pthread_mutex_unlock(&lock->alloc_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
|
2025-03-04 23:32:56 +08:00
|
|
|
uint32_t count)
|
2024-01-12 23:39:56 +08:00
|
|
|
{
|
|
|
|
struct rcu_qp *new =
|
|
|
|
OPENSSL_zalloc(sizeof(*new) * count);
|
|
|
|
|
|
|
|
lock->group_count = count;
|
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&lock->write_lock);
|
Make thread sanitizer cope with rcu locks
This is unfortunate, but seems necessecary
tsan in gcc/clang tracks data races by recording memory references made
while various locks are held. If it finds that a given address is
read/written while under lock (or under no locks without the use of
atomics), it issues a warning
this creates a specific problem for rcu, because on the write side of a
critical section, we write data under the protection of a lock, but by
definition the read side has no lock, and so rcu warns us about it,
which is really a false positive, because we know that, even if a
pointer changes its value, the data it points to will be valid.
The best way to fix it, short of implementing tsan hooks for rcu locks
in any thread sanitizer in the field, is to 'fake it'. If thread
sanitization is activated, then in ossl_rcu_write_[lock|unlock] we add
annotations to make the sanitizer think that, after the write lock is
taken, that we immediately unlock it, and lock it right before we unlock
it again. In this way tsan thinks there are no locks held while
referencing protected data on the read or write side.
we still need to use atomics to ensure that tsan recognizes that we are
doing atomic accesses safely, but thats ok, and we still get warnings if
we don't do that properly
Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/23671)
2024-03-01 06:22:06 +08:00
|
|
|
TSAN_FAKE_UNLOCK(&lock->write_lock);
|
2024-01-12 23:39:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
|
|
|
|
{
|
Make thread sanitizer cope with rcu locks
This is unfortunate, but seems necessecary
tsan in gcc/clang tracks data races by recording memory references made
while various locks are held. If it finds that a given address is
read/written while under lock (or under no locks without the use of
atomics), it issues a warning
this creates a specific problem for rcu, because on the write side of a
critical section, we write data under the protection of a lock, but by
definition the read side has no lock, and so rcu warns us about it,
which is really a false positive, because we know that, even if a
pointer changes its value, the data it points to will be valid.
The best way to fix it, short of implementing tsan hooks for rcu locks
in any thread sanitizer in the field, is to 'fake it'. If thread
sanitization is activated, then in ossl_rcu_write_[lock|unlock] we add
annotations to make the sanitizer think that, after the write lock is
taken, that we immediately unlock it, and lock it right before we unlock
it again. In this way tsan thinks there are no locks held while
referencing protected data on the read or write side.
we still need to use atomics to ensure that tsan recognizes that we are
doing atomic accesses safely, but thats ok, and we still get warnings if
we don't do that properly
Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/23671)
2024-03-01 06:22:06 +08:00
|
|
|
TSAN_FAKE_LOCK(&lock->write_lock);
|
2024-01-12 23:39:56 +08:00
|
|
|
pthread_mutex_unlock(&lock->write_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
|
|
|
|
{
|
|
|
|
struct rcu_qp *qp;
|
|
|
|
uint64_t count;
|
2025-02-10 00:24:43 +08:00
|
|
|
uint32_t curr_id;
|
2024-01-12 23:39:56 +08:00
|
|
|
struct rcu_cb_item *cb_items, *tmpcb;
|
|
|
|
|
Make thread sanitizer cope with rcu locks
This is unfortunate, but seems necessecary
tsan in gcc/clang tracks data races by recording memory references made
while various locks are held. If it finds that a given address is
read/written while under lock (or under no locks without the use of
atomics), it issues a warning
this creates a specific problem for rcu, because on the write side of a
critical section, we write data under the protection of a lock, but by
definition the read side has no lock, and so rcu warns us about it,
which is really a false positive, because we know that, even if a
pointer changes its value, the data it points to will be valid.
The best way to fix it, short of implementing tsan hooks for rcu locks
in any thread sanitizer in the field, is to 'fake it'. If thread
sanitization is activated, then in ossl_rcu_write_[lock|unlock] we add
annotations to make the sanitizer think that, after the write lock is
taken, that we immediately unlock it, and lock it right before we unlock
it again. In this way tsan thinks there are no locks held while
referencing protected data on the read or write side.
we still need to use atomics to ensure that tsan recognizes that we are
doing atomic accesses safely, but thats ok, and we still get warnings if
we don't do that properly
Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/23671)
2024-03-01 06:22:06 +08:00
|
|
|
pthread_mutex_lock(&lock->write_lock);
|
|
|
|
cb_items = lock->cb_items;
|
|
|
|
lock->cb_items = NULL;
|
|
|
|
pthread_mutex_unlock(&lock->write_lock);
|
2024-01-12 23:39:56 +08:00
|
|
|
|
2025-02-10 00:24:43 +08:00
|
|
|
qp = update_qp(lock, &curr_id);
|
|
|
|
|
|
|
|
/* retire in order */
|
|
|
|
pthread_mutex_lock(&lock->prior_lock);
|
|
|
|
while (lock->next_to_retire != curr_id)
|
|
|
|
pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* wait for the reader count to reach zero
|
|
|
|
* Note the use of __ATOMIC_ACQUIRE here to ensure that any
|
2025-02-10 00:24:43 +08:00
|
|
|
* prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
|
2024-01-12 23:39:56 +08:00
|
|
|
* is visible prior to our read
|
2025-02-10 00:24:43 +08:00
|
|
|
* however this is likely just necessary to silence a tsan warning
|
2025-03-09 18:20:43 +08:00
|
|
|
* because the read side should not do any write operation
|
|
|
|
* outside the atomic itself
|
2024-01-12 23:39:56 +08:00
|
|
|
*/
|
|
|
|
do {
|
2024-04-12 16:03:21 +08:00
|
|
|
count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
|
2025-02-10 00:24:43 +08:00
|
|
|
} while (count != (uint64_t)0);
|
2024-01-12 23:39:56 +08:00
|
|
|
|
2025-03-03 15:22:31 +08:00
|
|
|
lock->next_to_retire++;
|
|
|
|
pthread_cond_broadcast(&lock->prior_signal);
|
|
|
|
pthread_mutex_unlock(&lock->prior_lock);
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
retire_qp(lock, qp);
|
|
|
|
|
|
|
|
/* handle any callbacks that we have */
|
|
|
|
while (cb_items != NULL) {
|
|
|
|
tmpcb = cb_items;
|
|
|
|
cb_items = cb_items->next;
|
|
|
|
tmpcb->fn(tmpcb->data);
|
|
|
|
OPENSSL_free(tmpcb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-02-14 04:52:16 +08:00
|
|
|
/*
|
|
|
|
* Note: This call assumes its made under the protection of
|
|
|
|
* ossl_rcu_write_lock
|
|
|
|
*/
|
2024-01-12 23:39:56 +08:00
|
|
|
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
|
|
|
|
{
|
|
|
|
struct rcu_cb_item *new =
|
|
|
|
OPENSSL_zalloc(sizeof(*new));
|
|
|
|
|
|
|
|
if (new == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
new->data = data;
|
|
|
|
new->fn = cb;
|
2025-02-14 04:52:16 +08:00
|
|
|
|
|
|
|
new->next = lock->cb_items;
|
|
|
|
lock->cb_items = new;
|
2024-01-12 23:39:56 +08:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ossl_rcu_uptr_deref(void **p)
|
|
|
|
{
|
2024-04-12 16:03:21 +08:00
|
|
|
return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
|
2024-01-12 23:39:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ossl_rcu_assign_uptr(void **p, void **v)
|
|
|
|
{
|
2024-04-12 16:03:21 +08:00
|
|
|
ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
|
2024-01-12 23:39:56 +08:00
|
|
|
}
|
|
|
|
|
2024-04-16 04:56:29 +08:00
|
|
|
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
|
2024-01-12 23:39:56 +08:00
|
|
|
{
|
|
|
|
struct rcu_lock_st *new;
|
|
|
|
|
2025-01-11 03:37:28 +08:00
|
|
|
/*
|
2025-03-09 18:20:43 +08:00
|
|
|
* We need a minimum of 2 qp's
|
2025-01-11 03:37:28 +08:00
|
|
|
*/
|
2025-03-09 18:20:43 +08:00
|
|
|
if (num_writers < 2)
|
|
|
|
num_writers = 2;
|
2024-01-12 23:39:56 +08:00
|
|
|
|
2024-04-16 04:56:29 +08:00
|
|
|
ctx = ossl_lib_ctx_get_concrete(ctx);
|
|
|
|
if (ctx == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
new = OPENSSL_zalloc(sizeof(*new));
|
|
|
|
if (new == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2024-04-16 04:56:29 +08:00
|
|
|
new->ctx = ctx;
|
2024-01-12 23:39:56 +08:00
|
|
|
pthread_mutex_init(&new->write_lock, NULL);
|
|
|
|
pthread_mutex_init(&new->prior_lock, NULL);
|
|
|
|
pthread_mutex_init(&new->alloc_lock, NULL);
|
|
|
|
pthread_cond_init(&new->prior_signal, NULL);
|
|
|
|
pthread_cond_init(&new->alloc_signal, NULL);
|
2025-01-11 03:37:28 +08:00
|
|
|
|
|
|
|
new->qp_group = allocate_new_qp_group(new, num_writers);
|
2024-01-12 23:39:56 +08:00
|
|
|
if (new->qp_group == NULL) {
|
|
|
|
OPENSSL_free(new);
|
|
|
|
new = NULL;
|
|
|
|
}
|
2025-01-11 03:37:28 +08:00
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
|
|
|
|
{
|
|
|
|
struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
|
|
|
|
|
|
|
|
if (lock == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* make sure we're synchronized */
|
|
|
|
ossl_synchronize_rcu(rlock);
|
|
|
|
|
|
|
|
OPENSSL_free(rlock->qp_group);
|
|
|
|
/* There should only be a single qp left now */
|
|
|
|
OPENSSL_free(rlock);
|
|
|
|
}
|
|
|
|
|
2025-06-25 01:50:09 +08:00
|
|
|
# ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
/*
|
|
|
|
* Normally we would use a BIO here to do this, but we create locks during
|
|
|
|
* library initialization, and creating a bio too early, creates a recursive set
|
|
|
|
* of stack calls that leads us to call CRYPTO_thread_run_once while currently
|
|
|
|
* executing the init routine for various run_once functions, which leads to
|
|
|
|
* deadlock. Avoid that by just using a FILE pointer. Also note that we
|
|
|
|
* directly use a pthread_mutex_t to protect access from multiple threads
|
|
|
|
* to the contention log file. We do this because we want to avoid use
|
|
|
|
* of the CRYPTO_THREAD api so as to prevent recursive blocking reports.
|
|
|
|
*/
|
|
|
|
static FILE *contention_fp = NULL;
|
|
|
|
static CRYPTO_ONCE init_contention_fp = CRYPTO_ONCE_STATIC_INIT;
|
|
|
|
static int rwlock_count = 0;
|
|
|
|
pthread_mutex_t log_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
CRYPTO_THREAD_LOCAL thread_contention_data;
|
|
|
|
|
|
|
|
static void destroy_contention_data(void *data)
|
|
|
|
{
|
|
|
|
OPENSSL_free(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct stack_info {
|
|
|
|
unsigned int nptrs;
|
|
|
|
int write;
|
|
|
|
OSSL_TIME start;
|
|
|
|
OSSL_TIME duration;
|
|
|
|
char **strings;
|
|
|
|
};
|
|
|
|
|
|
|
|
# define STACKS_COUNT 32
|
|
|
|
struct stack_traces {
|
|
|
|
int lock_depth;
|
|
|
|
size_t idx;
|
|
|
|
struct stack_info stacks[STACKS_COUNT];
|
|
|
|
};
|
|
|
|
|
|
|
|
static void init_contention_fp_once(void)
|
|
|
|
{
|
|
|
|
# ifdef FIPS_MODULE
|
|
|
|
contention_fp = fopen("lock-contention-log-fips.txt", "w");
|
|
|
|
# else
|
|
|
|
contention_fp = fopen("lock-contention-log.txt", "w");
|
|
|
|
# endif
|
|
|
|
if (contention_fp == NULL)
|
|
|
|
fprintf(stderr, "Contention log file could not be opened, log will not be recorded\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a thread local key here to store our list of stack traces
|
|
|
|
* to be printed when we unlock the lock we are holding
|
|
|
|
*/
|
|
|
|
CRYPTO_THREAD_init_local(&thread_contention_data, destroy_contention_data);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
|
2015-10-26 00:43:55 +08:00
|
|
|
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
|
|
|
|
{
|
2016-11-29 01:54:43 +08:00
|
|
|
# ifdef USE_RWLOCK
|
2018-04-06 03:13:55 +08:00
|
|
|
CRYPTO_RWLOCK *lock;
|
|
|
|
|
2025-06-25 01:50:09 +08:00
|
|
|
# ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
CRYPTO_THREAD_run_once(&init_contention_fp, init_contention_fp_once);
|
|
|
|
__atomic_add_fetch(&rwlock_count, 1, __ATOMIC_ACQ_REL);
|
|
|
|
{
|
|
|
|
struct stack_info *thread_stack_info;
|
|
|
|
|
|
|
|
thread_stack_info = CRYPTO_THREAD_get_local(&thread_contention_data);
|
|
|
|
if (thread_stack_info == NULL) {
|
|
|
|
thread_stack_info = OPENSSL_zalloc(sizeof(struct stack_traces));
|
|
|
|
CRYPTO_THREAD_set_local(&thread_contention_data, thread_stack_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
|
2018-04-06 03:13:55 +08:00
|
|
|
/* Don't set error, to avoid recursion blowup. */
|
2015-10-26 00:43:55 +08:00
|
|
|
return NULL;
|
|
|
|
|
2016-03-02 04:59:48 +08:00
|
|
|
if (pthread_rwlock_init(lock, NULL) != 0) {
|
|
|
|
OPENSSL_free(lock);
|
2015-10-26 00:43:55 +08:00
|
|
|
return NULL;
|
2016-03-02 04:59:48 +08:00
|
|
|
}
|
2016-11-29 01:54:43 +08:00
|
|
|
# else
|
|
|
|
pthread_mutexattr_t attr;
|
2018-04-06 03:13:55 +08:00
|
|
|
CRYPTO_RWLOCK *lock;
|
|
|
|
|
2024-01-12 23:39:56 +08:00
|
|
|
if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
|
2018-04-06 03:13:55 +08:00
|
|
|
/* Don't set error, to avoid recursion blowup. */
|
2016-11-23 00:37:43 +08:00
|
|
|
return NULL;
|
|
|
|
|
2021-01-22 01:32:27 +08:00
|
|
|
/*
|
|
|
|
* We don't use recursive mutexes, but try to catch errors if we do.
|
|
|
|
*/
|
2016-11-23 00:37:43 +08:00
|
|
|
pthread_mutexattr_init(&attr);
|
2021-06-24 04:50:09 +08:00
|
|
|
# if !defined (__TANDEM) && !defined (_SPT_MODEL_)
|
|
|
|
# if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
|
2021-01-22 01:32:27 +08:00
|
|
|
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
2021-06-24 04:50:09 +08:00
|
|
|
# endif
|
|
|
|
# else
|
|
|
|
/* The SPT Thread Library does not define MUTEX attributes. */
|
2021-01-22 01:32:27 +08:00
|
|
|
# endif
|
2016-11-29 02:16:34 +08:00
|
|
|
|
2016-11-23 00:37:43 +08:00
|
|
|
if (pthread_mutex_init(lock, &attr) != 0) {
|
|
|
|
pthread_mutexattr_destroy(&attr);
|
|
|
|
OPENSSL_free(lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-11-29 02:16:34 +08:00
|
|
|
|
2016-11-23 00:37:43 +08:00
|
|
|
pthread_mutexattr_destroy(&attr);
|
2016-11-29 01:54:43 +08:00
|
|
|
# endif
|
2015-10-26 00:43:55 +08:00
|
|
|
|
|
|
|
return lock;
|
|
|
|
}
|
|
|
|
|
2025-06-25 01:50:09 +08:00
|
|
|
# ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
static void print_stack_traces(struct stack_traces *traces, FILE *fptr)
|
|
|
|
{
|
|
|
|
unsigned int j;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&log_lock);
|
|
|
|
while (traces != NULL && traces->idx >= 1) {
|
|
|
|
traces->idx--;
|
|
|
|
fprintf(fptr, "lock blocked on %s for %zu usec at time %zu tid %d\n",
|
|
|
|
traces->stacks[traces->idx].write == 1 ? "WRITE" : "READ",
|
|
|
|
ossl_time2us(traces->stacks[traces->idx].duration),
|
|
|
|
ossl_time2us(traces->stacks[traces->idx].start),
|
|
|
|
gettid());
|
|
|
|
if (traces->stacks[traces->idx].strings != NULL) {
|
|
|
|
for (j = 0; j < traces->stacks[traces->idx].nptrs; j++)
|
|
|
|
fprintf(fptr, "%s\n", traces->stacks[traces->idx].strings[j]);
|
|
|
|
free(traces->stacks[traces->idx].strings);
|
|
|
|
} else {
|
|
|
|
fprintf(fptr, "No stack trace available\n");
|
|
|
|
}
|
|
|
|
fprintf(contention_fp, "\n");
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&log_lock);
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
|
|
|
|
# define BT_BUF_SIZE 1024
|
|
|
|
|
2021-02-19 04:31:56 +08:00
|
|
|
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
|
2015-10-26 00:43:55 +08:00
|
|
|
{
|
2016-11-29 01:54:43 +08:00
|
|
|
# ifdef USE_RWLOCK
|
2025-06-25 01:50:09 +08:00
|
|
|
# ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
struct stack_traces *traces = CRYPTO_THREAD_get_local(&thread_contention_data);
|
|
|
|
|
|
|
|
if (ossl_unlikely(traces == NULL)) {
|
|
|
|
traces = OPENSSL_zalloc(sizeof(struct stack_traces));
|
|
|
|
CRYPTO_THREAD_set_local(&thread_contention_data, traces);
|
|
|
|
if (ossl_unlikely(traces == NULL))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
traces->lock_depth++;
|
|
|
|
if (pthread_rwlock_tryrdlock(lock)) {
|
|
|
|
void *buffer[BT_BUF_SIZE];
|
|
|
|
OSSL_TIME start, end;
|
|
|
|
|
|
|
|
start = ossl_time_now();
|
|
|
|
if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0)) {
|
|
|
|
traces->lock_depth--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
end = ossl_time_now();
|
|
|
|
traces->stacks[traces->idx].duration = ossl_time_subtract(end, start);
|
|
|
|
traces->stacks[traces->idx].nptrs = backtrace(buffer, BT_BUF_SIZE);
|
|
|
|
traces->stacks[traces->idx].strings = backtrace_symbols(buffer,
|
|
|
|
traces->stacks[traces->idx].nptrs);
|
|
|
|
traces->stacks[traces->idx].duration = ossl_time_subtract(end, start);
|
|
|
|
traces->stacks[traces->idx].start = start;
|
|
|
|
traces->stacks[traces->idx].write = 0;
|
|
|
|
traces->idx++;
|
|
|
|
if (traces->idx >= STACKS_COUNT) {
|
|
|
|
fprintf(stderr, "STACK RECORD OVERFLOW!\n");
|
|
|
|
print_stack_traces(traces, contention_fp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# else
|
2025-04-16 19:15:51 +08:00
|
|
|
if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))
|
2015-10-26 00:43:55 +08:00
|
|
|
return 0;
|
2025-06-25 01:50:09 +08:00
|
|
|
# endif
|
2016-11-29 01:54:43 +08:00
|
|
|
# else
|
2021-01-22 01:32:27 +08:00
|
|
|
if (pthread_mutex_lock(lock) != 0) {
|
|
|
|
assert(errno != EDEADLK && errno != EBUSY);
|
2016-11-23 00:37:43 +08:00
|
|
|
return 0;
|
2021-01-22 01:32:27 +08:00
|
|
|
}
|
2016-11-29 01:54:43 +08:00
|
|
|
# endif
|
2015-10-26 00:43:55 +08:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-02-19 04:31:56 +08:00
|
|
|
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
|
2015-10-26 00:43:55 +08:00
|
|
|
{
|
2016-11-29 01:54:43 +08:00
|
|
|
# ifdef USE_RWLOCK
|
2025-06-25 01:50:09 +08:00
|
|
|
# ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
struct stack_traces *traces = CRYPTO_THREAD_get_local(&thread_contention_data);
|
|
|
|
|
|
|
|
if (ossl_unlikely(traces == NULL)) {
|
|
|
|
traces = OPENSSL_zalloc(sizeof(struct stack_traces));
|
|
|
|
CRYPTO_THREAD_set_local(&thread_contention_data, traces);
|
|
|
|
if (ossl_unlikely(traces == NULL))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
traces->lock_depth++;
|
|
|
|
if (pthread_rwlock_trywrlock(lock)) {
|
|
|
|
void *buffer[BT_BUF_SIZE];
|
|
|
|
OSSL_TIME start, end;
|
|
|
|
|
|
|
|
start = ossl_time_now();
|
|
|
|
if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0)) {
|
|
|
|
traces->lock_depth--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
end = ossl_time_now();
|
|
|
|
traces->stacks[traces->idx].nptrs = backtrace(buffer, BT_BUF_SIZE);
|
|
|
|
traces->stacks[traces->idx].strings = backtrace_symbols(buffer,
|
|
|
|
traces->stacks[traces->idx].nptrs);
|
|
|
|
traces->stacks[traces->idx].duration = ossl_time_subtract(end, start);
|
|
|
|
traces->stacks[traces->idx].start = start;
|
|
|
|
traces->stacks[traces->idx].write = 1;
|
|
|
|
traces->idx++;
|
|
|
|
if (traces->idx >= STACKS_COUNT) {
|
|
|
|
fprintf(stderr, "STACK RECORD OVERFLOW!\n");
|
|
|
|
print_stack_traces(traces, contention_fp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# else
|
2025-04-16 19:15:51 +08:00
|
|
|
if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))
|
2015-10-26 00:43:55 +08:00
|
|
|
return 0;
|
2025-06-25 01:50:09 +08:00
|
|
|
# endif
|
2016-11-29 01:54:43 +08:00
|
|
|
# else
|
2021-01-22 01:32:27 +08:00
|
|
|
if (pthread_mutex_lock(lock) != 0) {
|
|
|
|
assert(errno != EDEADLK && errno != EBUSY);
|
2016-11-23 00:37:43 +08:00
|
|
|
return 0;
|
2021-01-22 01:32:27 +08:00
|
|
|
}
|
2016-11-29 01:54:43 +08:00
|
|
|
# endif
|
2015-10-26 00:43:55 +08:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
2016-11-29 01:54:43 +08:00
|
|
|
# ifdef USE_RWLOCK
|
2015-10-26 00:43:55 +08:00
|
|
|
if (pthread_rwlock_unlock(lock) != 0)
|
|
|
|
return 0;
|
2025-06-25 01:50:09 +08:00
|
|
|
# ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
{
|
|
|
|
struct stack_traces *traces = CRYPTO_THREAD_get_local(&thread_contention_data);
|
|
|
|
|
|
|
|
if (contention_fp != NULL && traces != NULL) {
|
|
|
|
traces->lock_depth--;
|
|
|
|
assert(traces->lock_depth >= 0);
|
|
|
|
if (traces->lock_depth == 0)
|
|
|
|
print_stack_traces(traces, contention_fp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# endif
|
2016-11-29 01:54:43 +08:00
|
|
|
# else
|
2021-01-22 01:32:27 +08:00
|
|
|
if (pthread_mutex_unlock(lock) != 0) {
|
|
|
|
assert(errno != EPERM);
|
2016-11-23 00:37:43 +08:00
|
|
|
return 0;
|
2021-01-22 01:32:27 +08:00
|
|
|
}
|
2016-11-29 01:54:43 +08:00
|
|
|
# endif
|
2015-10-26 00:43:55 +08:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
|
|
|
if (lock == NULL)
|
|
|
|
return;
|
2025-06-25 01:50:09 +08:00
|
|
|
# ifdef REPORT_RWLOCK_CONTENTION
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: It's possible here that OpenSSL may allocate a lock and immediately
|
|
|
|
* free it, in which case we would erroneously close the contention log
|
|
|
|
* prior to the library going on to do more real work. In practice
|
|
|
|
* that never happens though, and since this is a debug facility
|
|
|
|
* we don't worry about that here.
|
|
|
|
*/
|
|
|
|
if (__atomic_add_fetch(&rwlock_count, -1, __ATOMIC_ACQ_REL) == 0) {
|
|
|
|
fclose(contention_fp);
|
|
|
|
contention_fp = NULL;
|
|
|
|
}
|
|
|
|
# endif
|
2015-10-26 00:43:55 +08:00
|
|
|
|
2016-11-29 01:54:43 +08:00
|
|
|
# ifdef USE_RWLOCK
|
2015-10-26 00:43:55 +08:00
|
|
|
pthread_rwlock_destroy(lock);
|
2016-11-29 01:54:43 +08:00
|
|
|
# else
|
2016-11-23 00:37:43 +08:00
|
|
|
pthread_mutex_destroy(lock);
|
2016-11-29 01:54:43 +08:00
|
|
|
# endif
|
2015-10-26 00:43:55 +08:00
|
|
|
OPENSSL_free(lock);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
|
|
|
|
{
|
|
|
|
if (pthread_once(once, init) != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
|
|
|
|
{
|
|
|
|
if (pthread_key_create(key, cleanup) != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
|
|
|
|
{
|
|
|
|
return pthread_getspecific(*key);
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
|
|
|
|
{
|
|
|
|
if (pthread_setspecific(*key, val) != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
|
|
|
|
{
|
|
|
|
if (pthread_key_delete(*key) != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
|
|
|
|
{
|
|
|
|
return pthread_self();
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
|
|
|
|
{
|
|
|
|
return pthread_equal(a, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
2022-04-07 21:07:37 +08:00
|
|
|
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
|
2016-08-24 15:14:44 +08:00
|
|
|
if (__atomic_is_lock_free(sizeof(*val), val)) {
|
|
|
|
*ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
|
|
|
|
return 1;
|
|
|
|
}
|
2019-06-21 16:31:05 +08:00
|
|
|
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
|
|
|
|
/* This will work for all future Solaris versions. */
|
|
|
|
if (ret != NULL) {
|
|
|
|
*ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
|
|
|
|
return 1;
|
|
|
|
}
|
2016-08-24 15:14:44 +08:00
|
|
|
# endif
|
2020-12-23 01:43:07 +08:00
|
|
|
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
|
2015-10-26 00:43:55 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
*val += amount;
|
|
|
|
*ret = *val;
|
|
|
|
|
|
|
|
if (!CRYPTO_THREAD_unlock(lock))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2024-06-28 13:16:10 +08:00
|
|
|
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
|
|
|
|
CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
|
|
|
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
|
|
|
|
if (__atomic_is_lock_free(sizeof(*val), val)) {
|
|
|
|
*ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
|
|
|
|
/* This will work for all future Solaris versions. */
|
|
|
|
if (ret != NULL) {
|
|
|
|
*ret = atomic_add_64_nv(val, op);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
|
|
|
|
return 0;
|
|
|
|
*val += op;
|
|
|
|
*ret = *val;
|
|
|
|
|
|
|
|
if (!CRYPTO_THREAD_unlock(lock))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
|
|
|
|
CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
|
|
|
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
|
|
|
|
if (__atomic_is_lock_free(sizeof(*val), val)) {
|
|
|
|
*ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
|
|
|
|
/* This will work for all future Solaris versions. */
|
|
|
|
if (ret != NULL) {
|
|
|
|
*ret = atomic_and_64_nv(val, op);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
|
|
|
|
return 0;
|
|
|
|
*val &= op;
|
|
|
|
*ret = *val;
|
|
|
|
|
|
|
|
if (!CRYPTO_THREAD_unlock(lock))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-12-23 01:43:07 +08:00
|
|
|
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
|
|
|
|
CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
2022-04-07 21:07:37 +08:00
|
|
|
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
|
2020-12-23 01:43:07 +08:00
|
|
|
if (__atomic_is_lock_free(sizeof(*val), val)) {
|
|
|
|
*ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
|
|
|
|
/* This will work for all future Solaris versions. */
|
|
|
|
if (ret != NULL) {
|
|
|
|
*ret = atomic_or_64_nv(val, op);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
|
|
|
|
return 0;
|
|
|
|
*val |= op;
|
|
|
|
*ret = *val;
|
|
|
|
|
|
|
|
if (!CRYPTO_THREAD_unlock(lock))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
2025-02-19 21:40:44 +08:00
|
|
|
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
|
2020-12-23 01:43:07 +08:00
|
|
|
if (__atomic_is_lock_free(sizeof(*val), val)) {
|
|
|
|
__atomic_load(val, ret, __ATOMIC_ACQUIRE);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
|
|
|
|
/* This will work for all future Solaris versions. */
|
|
|
|
if (ret != NULL) {
|
|
|
|
*ret = atomic_or_64_nv(val, 0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
|
|
|
|
return 0;
|
|
|
|
*ret = *val;
|
|
|
|
if (!CRYPTO_THREAD_unlock(lock))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2023-05-17 20:15:01 +08:00
|
|
|
|
2024-03-09 00:58:07 +08:00
|
|
|
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
2025-02-19 21:40:44 +08:00
|
|
|
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
|
2024-03-09 00:58:07 +08:00
|
|
|
if (__atomic_is_lock_free(sizeof(*dst), dst)) {
|
|
|
|
__atomic_store(dst, &val, __ATOMIC_RELEASE);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
|
|
|
|
/* This will work for all future Solaris versions. */
|
2024-11-28 01:22:19 +08:00
|
|
|
if (dst != NULL) {
|
2024-03-09 00:58:07 +08:00
|
|
|
atomic_swap_64(dst, val);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# endif
|
2024-07-23 05:17:54 +08:00
|
|
|
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
|
2024-03-09 00:58:07 +08:00
|
|
|
return 0;
|
|
|
|
*dst = val;
|
|
|
|
if (!CRYPTO_THREAD_unlock(lock))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-05-17 20:15:01 +08:00
|
|
|
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
|
|
|
|
{
|
2025-02-19 21:40:44 +08:00
|
|
|
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
|
2023-05-17 20:15:01 +08:00
|
|
|
if (__atomic_is_lock_free(sizeof(*val), val)) {
|
|
|
|
__atomic_load(val, ret, __ATOMIC_ACQUIRE);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
|
|
|
|
/* This will work for all future Solaris versions. */
|
|
|
|
if (ret != NULL) {
|
threads_pthread, threads_win: improve code consistency
Improve code consistency between threads_pthread.c and threads_win.c
threads_pthread.c has good comments, let's copy them to threads_win.c
In many places uint64_t or LONG int was used, and assignments were
performed between variables with different sizes.
Unify the code to use uint32_t. In 32 bit architectures it is easier
to perform 32 bit atomic operations. The size is large enough to hold
the list of operations.
Fix result of atomic_or_uint_nv improperly casted to int *
instead of int.
Note:
In general size_t should be preferred for size and index, due to its
descriptive name, however it is more convenient to use uint32_t for
consistency between platforms and atomic calls.
READER_COUNT and ID_VAL return results that fit 32 bit. Cast them to
uint32_t to save a few CPU cycles, since they are used in 32 bit
operations anyway.
TODO:
In struct rcu_lock_st, qp_group can be moved before id_ctr
for better alignment, which would save 8 bytes.
allocate_new_qp_group has a parameter count of type int.
Signed values should be avoided as size or index.
It is better to use unsigned, e.g uint32_t, even though
internally this is assigned to a uint32_t variable.
READER_SIZE is 16 in threads_pthread.c, and 32 in threads_win.c
Using a common size for consistency should be prefered.
Signed-off-by: Georgi Valkov <gvalkov@gmail.com>
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24803)
2024-07-10 22:29:09 +08:00
|
|
|
*ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
|
2023-05-17 20:15:01 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
|
|
|
|
return 0;
|
|
|
|
*ret = *val;
|
|
|
|
if (!CRYPTO_THREAD_unlock(lock))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-04-14 04:34:56 +08:00
|
|
|
# ifndef FIPS_MODULE
|
2017-06-23 02:00:06 +08:00
|
|
|
int openssl_init_fork_handlers(void)
|
|
|
|
{
|
2022-03-09 01:04:59 +08:00
|
|
|
return 1;
|
2017-06-23 02:00:06 +08:00
|
|
|
}
|
2020-04-14 04:34:56 +08:00
|
|
|
# endif /* FIPS_MODULE */
|
2019-05-28 03:03:09 +08:00
|
|
|
|
|
|
|
int openssl_get_fork_id(void)
|
|
|
|
{
|
|
|
|
return getpid();
|
|
|
|
}
|
2015-10-26 00:43:55 +08:00
|
|
|
#endif
|