@@ -58,6 +58,8 @@ using namespace v8;
58
58
59
59
namespace dd {
60
60
61
+ using ContextPtr = std::shared_ptr<Global<Value>>;
62
+
61
63
// Maximum number of rounds in the GetV8ToEpochOffset
62
64
static constexpr int MAX_EPOCH_OFFSET_ATTEMPTS = 20 ;
63
65
@@ -492,7 +494,6 @@ WallProfiler::WallProfiler(std::chrono::microseconds samplingPeriod,
492
494
contexts_.reserve (duration * 2 / samplingPeriod);
493
495
}
494
496
495
- curContext_.store (&context1_, std::memory_order_relaxed);
496
497
collectionMode_.store (CollectionMode::kNoCollect , std::memory_order_relaxed);
497
498
498
499
auto isolate = v8::Isolate::GetCurrent ();
@@ -952,28 +953,26 @@ v8::CpuProfiler* WallProfiler::CreateV8CpuProfiler() {
952
953
}
953
954
954
955
v8::Local<v8::Value> WallProfiler::GetContext (Isolate* isolate) {
955
- auto context = *curContext_. load (std::memory_order_relaxed );
956
+ auto context = GetContextPtr ( );
956
957
if (!context) return v8::Undefined (isolate);
957
958
return context->Get (isolate);
958
959
}
959
960
960
961
void WallProfiler::SetContext (Isolate* isolate, Local<Value> value) {
961
- // Need to be careful here, because we might be interrupted by a
962
- // signal handler that will make use of curContext_.
963
- // Update of shared_ptr is not atomic, so instead we use a pointer
964
- // (curContext_) that points on two shared_ptr (context1_ and context2_),
965
- // update the shared_ptr that is not currently in use and then atomically
966
- // update curContext_.
967
- auto newCurContext = curContext_.load (std::memory_order_relaxed) == &context1_
968
- ? &context2_
969
- : &context1_;
970
- if (!value->IsNullOrUndefined ()) {
971
- *newCurContext = std::make_shared<Global<Value>>(isolate, value);
972
- } else {
973
- newCurContext->reset ();
974
- }
975
962
std::atomic_signal_fence (std::memory_order_release);
976
- curContext_.store (newCurContext, std::memory_order_relaxed);
963
+ std::atomic_store_explicit (
964
+ &curContext_,
965
+ value->IsNullOrUndefined ()
966
+ ? std::shared_ptr<Global<Value>>()
967
+ : std::make_shared<Global<Value>>(isolate, value),
968
+ std::memory_order_relaxed);
969
+ }
970
+
971
+ ContextPtr WallProfiler::GetContextPtr () {
972
+ auto contextPtr =
973
+ atomic_load_explicit (&curContext_, std::memory_order_relaxed);
974
+ std::atomic_signal_fence (std::memory_order_acquire);
975
+ return contextPtr;
977
976
}
978
977
979
978
NAN_GETTER (WallProfiler::GetContext) {
@@ -1007,10 +1006,8 @@ void WallProfiler::PushContext(int64_t time_from,
1007
1006
// Be careful this is called in a signal handler context therefore all
1008
1007
// operations must be async signal safe (in particular no allocations).
1009
1008
// Our ring buffer avoids allocations.
1010
- auto context = curContext_.load (std::memory_order_relaxed);
1011
- std::atomic_signal_fence (std::memory_order_acquire);
1012
1009
if (contexts_.size () < contexts_.capacity ()) {
1013
- contexts_.push_back ({*context , time_from, time_to, cpu_time});
1010
+ contexts_.push_back ({GetContextPtr () , time_from, time_to, cpu_time});
1014
1011
std::atomic_fetch_add_explicit (
1015
1012
reinterpret_cast <std::atomic<uint32_t >*>(&fields_[kSampleCount ]),
1016
1013
1U ,
0 commit comments