PMDK C++ bindings 1.13.0
This is the C++ bindings documentation for PMDK's libpmemobj.
Loading...
Searching...
No Matches
ebr.hpp
Go to the documentation of this file.
1/*-
2 * Copyright (c) 2015-2018 Mindaugas Rasiukevicius <rmind at noxt eu>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27// SPDX-License-Identifier: BSD-3-Clause
28/* Copyright 2021, Intel Corporation */
29
35#ifndef LIBPMEMOBJ_EBR_HPP
36#define LIBPMEMOBJ_EBR_HPP
37
38#include <atomic>
39#include <cassert>
40#include <functional>
41#include <mutex>
42#include <thread>
43#include <unordered_map>
44
46
47namespace pmem
48{
49
50namespace detail
51{
52
71class ebr {
72 using atomic = std::atomic<size_t>;
73 using reference = std::reference_wrapper<atomic>;
74
75public:
76 class worker;
77
78 ebr();
79
80 worker register_worker();
81 bool sync();
82 void full_sync();
83 size_t staging_epoch();
84 size_t gc_epoch();
85
86 class worker {
87 public:
88 worker(const worker &w) = delete;
89 worker(worker &&w) = default;
90 ~worker();
91
92 worker &operator=(worker &w) = delete;
93 worker &operator=(worker &&w) = default;
94
95 template <typename F>
96 void critical(F &&f);
97
98 private:
99 worker(ebr *e_, reference ref);
100
101 reference local_epoch;
102 ebr *e;
103
104 friend ebr;
105 };
106
107private:
108 static const size_t ACTIVE_FLAG = static_cast<size_t>(1)
109 << (sizeof(size_t) * 8 - 1);
110 static const size_t EPOCHS_NUMBER = 3;
111
112 atomic global_epoch;
113
114 std::unordered_map<std::thread::id, atomic> workers;
115 std::mutex mtx;
116};
117
121ebr::ebr() : global_epoch(0)
122{
123#if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
124 VALGRIND_HG_DISABLE_CHECKING(&global_epoch, sizeof(global_epoch));
125#endif
126}
127
139ebr::worker
141{
142 std::lock_guard<std::mutex> lock(mtx);
143 auto res = workers.emplace(std::this_thread::get_id(), 0);
144 if (!res.second) {
145 throw std::runtime_error(
146 "There can be only one worker per thread");
147 }
148
149 return worker{this, reference{res.first->second}};
150}
151
164bool
166{
167 auto current_epoch = global_epoch.load();
168
169 std::lock_guard<std::mutex> lock(mtx);
170 for (auto &w : workers) {
171 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(
172 std::memory_order_seq_cst, &w.second);
173 auto local_e = w.second.load();
174 bool active = local_e & ACTIVE_FLAG;
175 if (active && (local_e != (current_epoch | ACTIVE_FLAG))) {
176 return false;
177 }
178 }
179
180 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(std::memory_order_seq_cst,
181 &global_epoch);
182 global_epoch.store((current_epoch + 1) % EPOCHS_NUMBER);
183
184 return true;
185}
186
194void
196{
197 size_t syncs_cnt = 0;
198 while (true) {
199 if (sync() && ++syncs_cnt == EPOCHS_NUMBER) {
200 break;
201 }
202 }
203}
204
213size_t
215{
216 auto res = global_epoch.load();
217 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
218 &global_epoch);
219 return res;
220}
221
230size_t
232{
233 auto res = (global_epoch.load() + 1) % EPOCHS_NUMBER;
234 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
235 &global_epoch);
236 return res;
237}
238
239ebr::worker::worker(ebr *e_, reference ref) : local_epoch(ref), e(e_)
240{
241#if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
242 VALGRIND_HG_DISABLE_CHECKING(&ref.get(), sizeof(ref.get()));
243#endif
244}
245
250ebr::worker::~worker()
251{
252 std::lock_guard<std::mutex> lock(e->mtx);
253 e->workers.erase(std::this_thread::get_id());
254}
255
264template <typename F>
265void
266ebr::worker::critical(F &&f)
267{
268 auto new_epoch = e->global_epoch.load() | ACTIVE_FLAG;
269 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
270 &(e->global_epoch));
271
272 local_epoch.get().store(new_epoch);
273 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
274 &local_epoch);
275
276 f();
277
278 local_epoch.get().store(0);
279}
280
281} /* namespace detail */
282
283} /* namespace pmem */
284
285#endif /* LIBPMEMOBJ_EBR_HPP */
Epoch-based reclamation (EBR).
Definition: ebr.hpp:71
bool sync()
Attempts to synchronise and announce a new epoch.
Definition: ebr.hpp:165
worker register_worker()
Registers and returns a new worker, which can perform critical operations (accessing some shared data...
Definition: ebr.hpp:140
size_t staging_epoch()
Returns the epoch where objects can be staged for reclamation.
Definition: ebr.hpp:214
ebr()
Default and only ebr constructor.
Definition: ebr.hpp:121
void full_sync()
Perform full synchronisation ensuring that all objects which are no longer globally visible (and pote...
Definition: ebr.hpp:195
size_t gc_epoch()
Returns the epoch available for reclamation, i.e.
Definition: ebr.hpp:231
Commonly used functionality.
Persistent memory namespace.
Definition: allocation_flag.hpp:15