diff --git a/doc/api/report.md b/doc/api/report.md index 984f6c6f403b70..b37183d1d384d4 100644 --- a/doc/api/report.md +++ b/doc/api/report.md @@ -297,6 +297,7 @@ is provided below for reference. "address": "0x000055fc7b2cb180" } ], + "workers": [], "environmentVariables": { "REMOTEHOST": "REMOVED", "MANPATH": "/opt/rh/devtoolset-3/root/usr/share/man:", @@ -578,4 +579,24 @@ NODE_OPTIONS="--experimental-report --report-uncaught-exception \ Specific API documentation can be found under [`process API documentation`][] section. +## Interaction with Workers + + +[`Worker`][] threads can create reports in the same way that the main thread +does. + +Reports will include information on any Workers that are children of the current +thread as part of the `workers` section, with each Worker generating a report +in the standard report format. + +The thread which is generating the report will wait for the reports from Worker +threads to finish. However, the latency for this will usually be low, as both +running JavaScript and the event loop are interrupted to generate the report. + [`process API documentation`]: process.html +[`Worker`]: worker_threads.html diff --git a/node.gyp b/node.gyp index b49f6bfb42eb29..ee99fbe7c522e9 100644 --- a/node.gyp +++ b/node.gyp @@ -1117,6 +1117,7 @@ 'test/cctest/node_test_fixture.h', 'test/cctest/test_aliased_buffer.cc', 'test/cctest/test_base64.cc', + 'test/cctest/test_base_object_ptr.cc', 'test/cctest/test_node_postmortem_metadata.cc', 'test/cctest/test_environment.cc', 'test/cctest/test_linked_binding.cc', diff --git a/src/base_object-inl.h b/src/base_object-inl.h index a5a92128cf6967..efd8f17e9db5bb 100644 --- a/src/base_object-inl.h +++ b/src/base_object-inl.h @@ -40,16 +40,25 @@ namespace node { BaseObject::BaseObject(Environment* env, v8::Local object) - : persistent_handle_(env->isolate(), object), - env_(env) { + : persistent_handle_(env->isolate(), object), env_(env) { CHECK_EQ(false, object.IsEmpty()); CHECK_GT(object->InternalFieldCount(), 0); object->SetAlignedPointerInInternalField(0, static_cast(this)); - env_->AddCleanupHook(DeleteMe, static_cast(this)); + env->AddCleanupHook(DeleteMe, static_cast(this)); + env->modify_base_object_count(1); } BaseObject::~BaseObject() { - RemoveCleanupHook(); + env()->modify_base_object_count(-1); + env()->RemoveCleanupHook(DeleteMe, static_cast(this)); + + if (UNLIKELY(has_pointer_data())) { + PointerData* metadata = pointer_data(); + CHECK_EQ(metadata->strong_ptr_count, 0); + metadata->self = nullptr; + if (metadata->weak_ptr_count == 0) + delete metadata; + } if (persistent_handle_.IsEmpty()) { // This most likely happened because the weak callback below cleared it. @@ -57,13 +66,14 @@ BaseObject::~BaseObject() { } { - v8::HandleScope handle_scope(env_->isolate()); + v8::HandleScope handle_scope(env()->isolate()); object()->SetAlignedPointerInInternalField(0, nullptr); } } -void BaseObject::RemoveCleanupHook() { - env_->RemoveCleanupHook(DeleteMe, static_cast(this)); +void BaseObject::Detach() { + CHECK_GT(pointer_data()->strong_ptr_count, 0); + pointer_data()->is_detached = true; } v8::Global& BaseObject::persistent() { @@ -72,14 +82,14 @@ v8::Global& BaseObject::persistent() { v8::Local BaseObject::object() const { - return PersistentToLocal::Default(env_->isolate(), persistent_handle_); + return PersistentToLocal::Default(env()->isolate(), persistent_handle_); } v8::Local BaseObject::object(v8::Isolate* isolate) const { v8::Local handle = object(); DCHECK_EQ(handle->CreationContext()->GetIsolate(), isolate); - DCHECK_EQ(env_->isolate(), isolate); + DCHECK_EQ(env()->isolate(), isolate); return handle; } @@ -88,7 +98,6 @@ Environment* BaseObject::env() const { return env_; } - BaseObject* BaseObject::FromJSObject(v8::Local obj) { CHECK_GT(obj->InternalFieldCount(), 0); return static_cast(obj->GetAlignedPointerFromInternalField(0)); @@ -102,20 +111,34 @@ T* BaseObject::FromJSObject(v8::Local object) { void BaseObject::MakeWeak() { + if (has_pointer_data()) { + pointer_data()->wants_weak_jsobj = true; + if (pointer_data()->strong_ptr_count > 0) return; + } + persistent_handle_.SetWeak( this, [](const v8::WeakCallbackInfo& data) { - std::unique_ptr obj(data.GetParameter()); + BaseObject* obj = data.GetParameter(); // Clear the persistent handle so that ~BaseObject() doesn't attempt // to mess with internal fields, since the JS object may have // transitioned into an invalid state. // Refs: https://github.com/nodejs/node/issues/18897 obj->persistent_handle_.Reset(); + CHECK_IMPLIES(obj->has_pointer_data(), + obj->pointer_data()->strong_ptr_count == 0); + obj->OnGCCollect(); }, v8::WeakCallbackType::kParameter); } +void BaseObject::OnGCCollect() { + delete this; +} void BaseObject::ClearWeak() { + if (has_pointer_data()) + pointer_data()->wants_weak_jsobj = false; + persistent_handle_.ClearWeak(); } @@ -149,6 +172,176 @@ void BaseObject::InternalFieldSet(v8::Local property, info.This()->SetInternalField(Field, value); } +bool BaseObject::has_pointer_data() const { + return pointer_data_ != nullptr; +} + +BaseObject::PointerData* BaseObject::pointer_data() { + if (!has_pointer_data()) { + PointerData* metadata = new PointerData(); + metadata->wants_weak_jsobj = persistent_handle_.IsWeak(); + metadata->self = this; + pointer_data_ = metadata; + } + CHECK(has_pointer_data()); + return pointer_data_; +} + +void BaseObject::decrease_refcount() { + CHECK(has_pointer_data()); + PointerData* metadata = pointer_data(); + CHECK_GT(metadata->strong_ptr_count, 0); + unsigned int new_refcount = --metadata->strong_ptr_count; + if (new_refcount == 0) { + if (metadata->is_detached) { + delete this; + } else if (metadata->wants_weak_jsobj && !persistent_handle_.IsEmpty()) { + MakeWeak(); + } + } +} + +void BaseObject::increase_refcount() { + unsigned int prev_refcount = pointer_data()->strong_ptr_count++; + if (prev_refcount == 0 && !persistent_handle_.IsEmpty()) + persistent_handle_.ClearWeak(); +} + +template +BaseObject::PointerData* +BaseObjectPtrImpl::pointer_data() const { + if (kIsWeak) { + return data_.pointer_data; + } else { + if (get_base_object() == nullptr) return nullptr; + return get_base_object()->pointer_data(); + } +} + +template +BaseObject* BaseObjectPtrImpl::get_base_object() const { + if (kIsWeak) { + if (pointer_data() == nullptr) return nullptr; + return pointer_data()->self; + } else { + return data_.target; + } +} + +template +BaseObjectPtrImpl::~BaseObjectPtrImpl() { + if (kIsWeak) { + if (pointer_data() != nullptr && + --pointer_data()->weak_ptr_count == 0 && + pointer_data()->self == nullptr) { + delete pointer_data(); + } + } else if (get() != nullptr) { + get()->decrease_refcount(); + } +} + +template +BaseObjectPtrImpl::BaseObjectPtrImpl() { + data_.target = nullptr; +} + +template +BaseObjectPtrImpl::BaseObjectPtrImpl(T* target) + : BaseObjectPtrImpl() { + if (target == nullptr) return; + if (kIsWeak) { + data_.pointer_data = target->pointer_data(); + CHECK_NOT_NULL(pointer_data()); + pointer_data()->weak_ptr_count++; + } else { + data_.target = target; + CHECK_NOT_NULL(pointer_data()); + get()->increase_refcount(); + } +} + +template +template +BaseObjectPtrImpl::BaseObjectPtrImpl( + const BaseObjectPtrImpl& other) + : BaseObjectPtrImpl(other.get()) {} + +template +BaseObjectPtrImpl::BaseObjectPtrImpl(const BaseObjectPtrImpl& other) + : BaseObjectPtrImpl(other.get()) {} + +template +template +BaseObjectPtrImpl& BaseObjectPtrImpl::operator=( + const BaseObjectPtrImpl& other) { + if (other.get() == get()) return *this; + this->~BaseObjectPtrImpl(); + return *new (this) BaseObjectPtrImpl(other); +} + +template +BaseObjectPtrImpl& BaseObjectPtrImpl::operator=( + const BaseObjectPtrImpl& other) { + if (other.get() == get()) return *this; + this->~BaseObjectPtrImpl(); + return *new (this) BaseObjectPtrImpl(other); +} + +template +BaseObjectPtrImpl::BaseObjectPtrImpl(BaseObjectPtrImpl&& other) + : data_(other.data_) { + if (kIsWeak) + other.data_.target = nullptr; + else + other.data_.pointer_data = nullptr; +} + +template +BaseObjectPtrImpl& BaseObjectPtrImpl::operator=( + BaseObjectPtrImpl&& other) { + if (&other == this) return *this; + this->~BaseObjectPtrImpl(); + return *new (this) BaseObjectPtrImpl(std::move(other)); +} + +template +void BaseObjectPtrImpl::reset(T* ptr) { + *this = BaseObjectPtrImpl(ptr); +} + +template +T* BaseObjectPtrImpl::get() const { + return static_cast(get_base_object()); +} + +template +T& BaseObjectPtrImpl::operator*() const { + return *get(); +} + +template +T* BaseObjectPtrImpl::operator->() const { + return get(); +} + +template +BaseObjectPtrImpl::operator bool() const { + return get() != nullptr; +} + +template +BaseObjectPtr MakeBaseObject(Args&&... args) { + return BaseObjectPtr(new T(std::forward(args)...)); +} + +template +BaseObjectPtr MakeDetachedBaseObject(Args&&... args) { + BaseObjectPtr target = MakeBaseObject(std::forward(args)...); + target->Detach(); + return target; +} + } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS diff --git a/src/base_object.h b/src/base_object.h index 0b202cd3a51324..daf40b7c1eb7b4 100644 --- a/src/base_object.h +++ b/src/base_object.h @@ -31,6 +31,8 @@ namespace node { class Environment; +template +class BaseObjectPtrImpl; class BaseObject : public MemoryRetainer { public: @@ -62,10 +64,12 @@ class BaseObject : public MemoryRetainer { static inline T* FromJSObject(v8::Local object); // Make the `v8::Global` a weak reference and, `delete` this object once - // the JS object has been garbage collected. + // the JS object has been garbage collected and there are no (strong) + // BaseObjectPtr references to it. inline void MakeWeak(); - // Undo `MakeWeak()`, i.e. turn this into a strong reference. + // Undo `MakeWeak()`, i.e. turn this into a strong reference that is a GC + // root and will not be touched by the garbage collector. inline void ClearWeak(); // Utility to create a FunctionTemplate with one internal field (used for @@ -86,11 +90,14 @@ class BaseObject : public MemoryRetainer { // This is a bit of a hack. See the override in async_wrap.cc for details. virtual bool IsDoneInitializing() const; + // Can be used to avoid this object keepling itself alive as a GC root + // indefinitely, for example when this object is owned and deleted by another + // BaseObject once that is torn down. This can only be called when there is + // a BaseObjectPtr to this object. + inline void Detach(); + protected: - // Can be used to avoid the automatic object deletion when the Environment - // exits, for example when this object is owned and deleted by another - // BaseObject at that point. - inline void RemoveCleanupHook(); + virtual inline void OnGCCollect(); private: v8::Local WrappedObject() const override; @@ -103,12 +110,44 @@ class BaseObject : public MemoryRetainer { // refer to `doc/guides/node-postmortem-support.md` friend int GenDebugSymbols(); friend class CleanupHookCallback; + template + friend class BaseObjectPtrImpl; v8::Global persistent_handle_; + + // Metadata that is associated with this BaseObject if there are BaseObjectPtr + // or BaseObjectWeakPtr references to it. + // This object is deleted when the BaseObject itself is destroyed, and there + // are no weak references to it. + struct PointerData { + // Number of BaseObjectPtr instances that refer to this object. If this + // is non-zero, the BaseObject is always a GC root and will not be destroyed + // during cleanup until the count drops to zero again. + unsigned int strong_ptr_count = 0; + // Number of BaseObjectWeakPtr instances that refer to this object. + unsigned int weak_ptr_count = 0; + // Indicates whether MakeWeak() has been called. + bool wants_weak_jsobj = false; + // Indicates whether Detach() has been called. If that is the case, this + // object will be destryoed once the strong pointer count drops to zero. + bool is_detached = false; + // Reference to the original BaseObject. This is used by weak pointers. + BaseObject* self = nullptr; + }; + + inline bool has_pointer_data() const; + // This creates a PointerData struct if none was associated with this + // BaseObject before. + inline PointerData* pointer_data(); + + // Functions that adjust the strong pointer count. + inline void decrease_refcount(); + inline void increase_refcount(); + Environment* env_; + PointerData* pointer_data_ = nullptr; }; - // Global alias for FromJSObject() to avoid churn. template inline T* Unwrap(v8::Local obj) { @@ -124,6 +163,63 @@ inline T* Unwrap(v8::Local obj) { return __VA_ARGS__; \ } while (0) +// Implementation of a generic strong or weak pointer to a BaseObject. +// If strong, this will keep the target BaseObject alive regardless of other +// circumstances such das GC or Environment cleanup. +// If weak, destruction behaviour is not affected, but the pointer will be +// reset to nullptr once the BaseObject is destroyed. +// The API matches std::shared_ptr closely. +template +class BaseObjectPtrImpl final { + public: + inline BaseObjectPtrImpl(); + inline ~BaseObjectPtrImpl(); + inline explicit BaseObjectPtrImpl(T* target); + + // Copy and move constructors. Note that the templated version is not a copy + // or move constructor in the C++ sense of the word, so an identical + // untemplated version is provided. + template + inline BaseObjectPtrImpl(const BaseObjectPtrImpl& other); + inline BaseObjectPtrImpl(const BaseObjectPtrImpl& other); + template + inline BaseObjectPtrImpl& operator=(const BaseObjectPtrImpl& other); + inline BaseObjectPtrImpl& operator=(const BaseObjectPtrImpl& other); + inline BaseObjectPtrImpl(BaseObjectPtrImpl&& other); + inline BaseObjectPtrImpl& operator=(BaseObjectPtrImpl&& other); + + inline void reset(T* ptr = nullptr); + inline T* get() const; + inline T& operator*() const; + inline T* operator->() const; + inline operator bool() const; + + private: + union { + BaseObject* target; // Used for strong pointers. + BaseObject::PointerData* pointer_data; // Used for weak pointers. + } data_; + + inline BaseObject* get_base_object() const; + inline BaseObject::PointerData* pointer_data() const; +}; + +template +using BaseObjectPtr = BaseObjectPtrImpl; +template +using BaseObjectWeakPtr = BaseObjectPtrImpl; + +// Create a BaseObject instance and return a pointer to it. +// This variant leaves the object as a GC root by default. +template +inline BaseObjectPtr MakeBaseObject(Args&&... args); +// Create a BaseObject instance and return a pointer to it. +// This variant detaches the object by default, meaning that the caller fully +// owns it, and once the last BaseObjectPtr to it is destroyed, the object +// itself is also destroyed. +template +inline BaseObjectPtr MakeDetachedBaseObject(Args&&... args); + } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS diff --git a/src/cares_wrap.cc b/src/cares_wrap.cc index 98ae98270b450d..8eeaeddf8300d8 100644 --- a/src/cares_wrap.cc +++ b/src/cares_wrap.cc @@ -575,10 +575,6 @@ class QueryWrap : public AsyncWrap { : AsyncWrap(channel->env(), req_wrap_obj, AsyncWrap::PROVIDER_QUERYWRAP), channel_(channel), trace_name_(name) { - // Make sure the channel object stays alive during the query lifetime. - req_wrap_obj->Set(env()->context(), - env()->channel_string(), - channel->object()).Check(); } ~QueryWrap() override { @@ -631,8 +627,6 @@ class QueryWrap : public AsyncWrap { } else { Parse(response_data_->host.get()); } - - delete this; } void* MakeCallbackPointer() { @@ -690,9 +684,13 @@ class QueryWrap : public AsyncWrap { } void QueueResponseCallback(int status) { - env()->SetImmediate([this](Environment*) { + BaseObjectPtr strong_ref{this}; + env()->SetImmediate([this, strong_ref](Environment*) { AfterResponse(); - }, object()); + + // Delete once strong_ref goes out of scope. + Detach(); + }); channel_->set_query_last_ok(status != ARES_ECONNREFUSED); channel_->ModifyActivityQueryCount(-1); @@ -735,7 +733,7 @@ class QueryWrap : public AsyncWrap { UNREACHABLE(); } - ChannelWrap* channel_; + BaseObjectPtr channel_; private: std::unique_ptr response_data_; diff --git a/src/env-inl.h b/src/env-inl.h index 4276935e73633d..08df9c061ac91f 100644 --- a/src/env-inl.h +++ b/src/env-inl.h @@ -242,14 +242,6 @@ inline bool ImmediateInfo::has_outstanding() const { return fields_[kHasOutstanding] == 1; } -inline void ImmediateInfo::count_inc(uint32_t increment) { - fields_[kCount] += increment; -} - -inline void ImmediateInfo::count_dec(uint32_t decrement) { - fields_[kCount] -= decrement; -} - inline void ImmediateInfo::ref_count_inc(uint32_t increment) { fields_[kRefCount] += increment; } @@ -732,28 +724,56 @@ inline void IsolateData::set_options( options_ = std::move(options); } -template -void Environment::CreateImmediate(Fn&& cb, - v8::Local keep_alive, - bool ref) { - auto callback = std::make_unique>( - std::move(cb), - v8::Global(isolate(), keep_alive), - ref); - NativeImmediateCallback* prev_tail = native_immediate_callbacks_tail_; +std::unique_ptr +Environment::NativeImmediateQueue::Shift() { + std::unique_ptr ret = std::move(head_); + if (ret) { + head_ = ret->get_next(); + if (!head_) + tail_ = nullptr; // The queue is now empty. + } + size_--; + return ret; +} - native_immediate_callbacks_tail_ = callback.get(); +void Environment::NativeImmediateQueue::Push( + std::unique_ptr cb) { + NativeImmediateCallback* prev_tail = tail_; + + size_++; + tail_ = cb.get(); if (prev_tail != nullptr) - prev_tail->set_next(std::move(callback)); + prev_tail->set_next(std::move(cb)); else - native_immediate_callbacks_head_ = std::move(callback); + head_ = std::move(cb); +} + +void Environment::NativeImmediateQueue::ConcatMove( + NativeImmediateQueue&& other) { + size_ += other.size_; + if (tail_ != nullptr) + tail_->set_next(std::move(other.head_)); + else + head_ = std::move(other.head_); + tail_ = other.tail_; + other.tail_ = nullptr; + other.size_ = 0; +} - immediate_info()->count_inc(1); +size_t Environment::NativeImmediateQueue::size() const { + return size_.load(); +} + +template +void Environment::CreateImmediate(Fn&& cb, bool ref) { + auto callback = std::make_unique>( + std::move(cb), ref); + native_immediates_.Push(std::move(callback)); } template -void Environment::SetImmediate(Fn&& cb, v8::Local keep_alive) { - CreateImmediate(std::move(cb), keep_alive, true); +void Environment::SetImmediate(Fn&& cb) { + CreateImmediate(std::move(cb), true); if (immediate_info()->ref_count() == 0) ToggleImmediateRef(true); @@ -761,8 +781,31 @@ void Environment::SetImmediate(Fn&& cb, v8::Local keep_alive) { } template -void Environment::SetUnrefImmediate(Fn&& cb, v8::Local keep_alive) { - CreateImmediate(std::move(cb), keep_alive, false); +void Environment::SetUnrefImmediate(Fn&& cb) { + CreateImmediate(std::move(cb), false); +} + +template +void Environment::SetImmediateThreadsafe(Fn&& cb) { + auto callback = std::make_unique>( + std::move(cb), false); + { + Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); + native_immediates_threadsafe_.Push(std::move(callback)); + } + uv_async_send(&task_queues_async_); +} + +template +void Environment::RequestInterrupt(Fn&& cb) { + auto callback = std::make_unique>( + std::move(cb), false); + { + Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); + native_immediates_interrupts_.Push(std::move(callback)); + } + uv_async_send(&task_queues_async_); + RequestInterruptFromV8(); } Environment::NativeImmediateCallback::NativeImmediateCallback(bool refed) @@ -784,10 +827,9 @@ void Environment::NativeImmediateCallback::set_next( template Environment::NativeImmediateCallbackImpl::NativeImmediateCallbackImpl( - Fn&& callback, v8::Global&& keep_alive, bool refed) + Fn&& callback, bool refed) : NativeImmediateCallback(refed), - callback_(std::move(callback)), - keep_alive_(std::move(keep_alive)) {} + callback_(std::move(callback)) {} template void Environment::NativeImmediateCallbackImpl::Call(Environment* env) { @@ -851,8 +893,26 @@ inline void Environment::remove_sub_worker_context(worker::Worker* context) { sub_worker_contexts_.erase(context); } +template +inline void Environment::ForEachWorker(Fn&& iterator) { + for (worker::Worker* w : sub_worker_contexts_) iterator(w); +} + +inline void Environment::add_refs(int64_t diff) { + task_queues_async_refs_ += diff; + CHECK_GE(task_queues_async_refs_, 0); + if (task_queues_async_refs_ == 0) + uv_unref(reinterpret_cast(&task_queues_async_)); + else + uv_ref(reinterpret_cast(&task_queues_async_)); +} + inline bool Environment::is_stopping() const { - return thread_stopper_.is_stopped(); + return is_stopping_.load(); +} + +inline void Environment::set_stopping(bool value) { + is_stopping_.store(value); } inline std::list* Environment::extra_linked_bindings() { @@ -1125,7 +1185,7 @@ void Environment::RemoveCleanupHook(void (*fn)(void*), void* arg) { inline void Environment::RegisterFinalizationGroupForCleanup( v8::Local group) { cleanup_finalization_groups_.emplace_back(isolate(), group); - uv_async_send(&cleanup_finalization_groups_async_); + uv_async_send(&task_queues_async_); } size_t CleanupHookCallback::Hash::operator()( @@ -1154,12 +1214,12 @@ void Environment::ForEachBaseObject(T&& iterator) { } } -bool AsyncRequest::is_stopped() const { - return stopped_.load(); +void Environment::modify_base_object_count(int64_t delta) { + base_object_count_ += delta; } -void AsyncRequest::set_stopped(bool flag) { - stopped_.store(flag); +int64_t Environment::base_object_count() const { + return base_object_count_; } #define VP(PropertyName, StringValue) V(v8::Private, PropertyName) diff --git a/src/env.cc b/src/env.cc index dccf1c918edec1..2bdac964cb03ca 100644 --- a/src/env.cc +++ b/src/env.cc @@ -387,6 +387,8 @@ Environment::Environment(IsolateData* isolate_data, } Environment::~Environment() { + if (interrupt_data_ != nullptr) *interrupt_data_ = nullptr; + isolate()->GetHeapProfiler()->RemoveBuildEmbedderGraphCallback( BuildEmbedderGraph, this); @@ -431,6 +433,8 @@ Environment::~Environment() { addon.Close(); } } + + CHECK_EQ(base_object_count(), 0); } void Environment::InitializeLibuv(bool start_profiler_idle_notifier) { @@ -460,23 +464,16 @@ void Environment::InitializeLibuv(bool start_profiler_idle_notifier) { uv_check_init(event_loop(), &idle_check_handle_); uv_async_init( event_loop(), - &cleanup_finalization_groups_async_, + &task_queues_async_, [](uv_async_t* async) { Environment* env = ContainerOf( - &Environment::cleanup_finalization_groups_async_, async); + &Environment::task_queues_async_, async); env->CleanupFinalizationGroups(); + env->RunAndClearNativeImmediates(); }); uv_unref(reinterpret_cast(&idle_prepare_handle_)); uv_unref(reinterpret_cast(&idle_check_handle_)); - uv_unref(reinterpret_cast(&cleanup_finalization_groups_async_)); - - thread_stopper()->Install( - this, static_cast(this), [](uv_async_t* handle) { - Environment* env = static_cast(handle->data); - uv_stop(env->event_loop()); - }); - thread_stopper()->set_stopped(false); - uv_unref(reinterpret_cast(thread_stopper()->GetHandle())); + uv_unref(reinterpret_cast(&task_queues_async_)); // Register clean-up cb to be called to clean up the handles // when the environment is freed, note that they are not cleaned in @@ -495,8 +492,9 @@ void Environment::InitializeLibuv(bool start_profiler_idle_notifier) { void Environment::ExitEnv() { set_can_call_into_js(false); - thread_stopper()->Stop(); + set_stopping(true); isolate_->TerminateExecution(); + SetImmediateThreadsafe([](Environment* env) { uv_stop(env->event_loop()); }); } void Environment::RegisterHandleCleanups() { @@ -532,12 +530,17 @@ void Environment::RegisterHandleCleanups() { close_and_finish, nullptr); RegisterHandleCleanup( - reinterpret_cast(&cleanup_finalization_groups_async_), + reinterpret_cast(&task_queues_async_), close_and_finish, nullptr); } void Environment::CleanupHandles() { + Isolate::DisallowJavascriptExecutionScope disallow_js(isolate(), + Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE); + + RunAndClearNativeImmediates(true /* skip SetUnrefImmediate()s */); + for (ReqWrapBase* request : req_wrap_queue_) request->Cancel(); @@ -596,7 +599,6 @@ void Environment::RunCleanup() { started_cleanup_ = true; TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment), "RunCleanup", this); - thread_stopper()->Uninstall(); CleanupHandles(); while (!cleanup_hooks_.empty()) { @@ -653,43 +655,89 @@ void Environment::AtExit(void (*cb)(void* arg), void* arg) { at_exit_functions_.push_front(ExitCallback{cb, arg}); } -void Environment::RunAndClearNativeImmediates() { +void Environment::RunAndClearInterrupts() { + while (native_immediates_interrupts_.size() > 0) { + NativeImmediateQueue queue; + { + Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); + queue.ConcatMove(std::move(native_immediates_interrupts_)); + } + DebugSealHandleScope seal_handle_scope(isolate()); + + while (std::unique_ptr head = queue.Shift()) + head->Call(this); + } +} + +void Environment::RunAndClearNativeImmediates(bool only_refed) { TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment), "RunAndClearNativeImmediates", this); size_t ref_count = 0; - size_t count = 0; - std::unique_ptr head; - head.swap(native_immediate_callbacks_head_); - native_immediate_callbacks_tail_ = nullptr; + + // Handle interrupts first. These functions are not allowed to throw + // exceptions, so we do not need to handle that. + RunAndClearInterrupts(); + + // It is safe to check .size() first, because there is a causal relationship + // between pushes to the threadsafe and this function being called. + // For the common case, it's worth checking the size first before establishing + // a mutex lock. + if (native_immediates_threadsafe_.size() > 0) { + Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); + native_immediates_.ConcatMove(std::move(native_immediates_threadsafe_)); + } auto drain_list = [&]() { TryCatchScope try_catch(this); - for (; head; head = head->get_next()) { - DebugSealHandleScope seal_handle_scope(isolate()); - count++; + DebugSealHandleScope seal_handle_scope(isolate()); + while (std::unique_ptr head = + native_immediates_.Shift()) { if (head->is_refed()) ref_count++; - head->Call(this); + if (head->is_refed() || !only_refed) + head->Call(this); + + head.reset(); // Destroy now so that this is also observed by try_catch. + if (UNLIKELY(try_catch.HasCaught())) { - if (!try_catch.HasTerminated()) + if (!try_catch.HasTerminated() && can_call_into_js()) errors::TriggerUncaughtException(isolate(), try_catch); - // We are done with the current callback. Move one iteration along, - // as if we had completed successfully. - head = head->get_next(); return true; } } return false; }; - while (head && drain_list()) {} + while (drain_list()) {} - DCHECK_GE(immediate_info()->count(), count); - immediate_info()->count_dec(count); immediate_info()->ref_count_dec(ref_count); + + if (immediate_info()->ref_count() == 0) + ToggleImmediateRef(false); } +void Environment::RequestInterruptFromV8() { + if (interrupt_data_ != nullptr) return; // Already scheduled. + + // The Isolate may outlive the Environment, so some logic to handle the + // situation in which the Environment is destroyed before the handler runs + // is required. + interrupt_data_ = new Environment*(this); + + isolate()->RequestInterrupt([](Isolate* isolate, void* data) { + std::unique_ptr env_ptr { static_cast(data) }; + Environment* env = *env_ptr; + if (env == nullptr) { + // The Environment has already been destroyed. That should be okay; any + // callback added before the Environment shuts down would have been + // handled during cleanup. + return; + } + env->interrupt_data_ = nullptr; + env->RunAndClearInterrupts(); + }, interrupt_data_); +} void Environment::ScheduleTimer(int64_t duration_ms) { if (started_cleanup_) return; @@ -773,15 +821,12 @@ void Environment::CheckImmediate(uv_check_t* handle) { TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment), "CheckImmediate", env); - if (env->immediate_info()->count() == 0) - return; - HandleScope scope(env->isolate()); Context::Scope context_scope(env->context()); env->RunAndClearNativeImmediates(); - if (!env->can_call_into_js()) + if (env->immediate_info()->count() == 0 || !env->can_call_into_js()) return; do { @@ -981,7 +1026,6 @@ inline size_t Environment::SelfSize() const { // TODO(joyeecheung): refactor the MemoryTracker interface so // this can be done for common types within the Track* calls automatically // if a certain scope is entered. - size -= sizeof(thread_stopper_); size -= sizeof(async_hooks_); size -= sizeof(tick_info_); size -= sizeof(immediate_info_); @@ -1003,7 +1047,6 @@ void Environment::MemoryInfo(MemoryTracker* tracker) const { tracker->TrackField("fs_stats_field_array", fs_stats_field_array_); tracker->TrackField("fs_stats_field_bigint_array", fs_stats_field_bigint_array_); - tracker->TrackField("thread_stopper", thread_stopper_); tracker->TrackField("cleanup_hooks", cleanup_hooks_); tracker->TrackField("async_hooks", async_hooks_); tracker->TrackField("immediate_info", immediate_info_); @@ -1076,47 +1119,19 @@ void Environment::CleanupFinalizationGroups() { if (try_catch.HasCaught() && !try_catch.HasTerminated()) errors::TriggerUncaughtException(isolate(), try_catch); // Re-schedule the execution of the remainder of the queue. - uv_async_send(&cleanup_finalization_groups_async_); + uv_async_send(&task_queues_async_); return; } } } -void AsyncRequest::Install(Environment* env, void* data, uv_async_cb target) { - CHECK_NULL(async_); - env_ = env; - async_ = new uv_async_t; - async_->data = data; - CHECK_EQ(uv_async_init(env_->event_loop(), async_, target), 0); -} - -void AsyncRequest::Uninstall() { - if (async_ != nullptr) { - env_->CloseHandle(async_, [](uv_async_t* async) { delete async; }); - async_ = nullptr; - } -} - -void AsyncRequest::Stop() { - set_stopped(true); - if (async_ != nullptr) uv_async_send(async_); -} - -uv_async_t* AsyncRequest::GetHandle() { - return async_; -} - -void AsyncRequest::MemoryInfo(MemoryTracker* tracker) const { - if (async_ != nullptr) tracker->TrackField("async_request", *async_); -} - -AsyncRequest::~AsyncRequest() { - CHECK_NULL(async_); -} - // Not really any better place than env.cc at this moment. void BaseObject::DeleteMe(void* data) { BaseObject* self = static_cast(data); + if (self->has_pointer_data() && + self->pointer_data()->strong_ptr_count > 0) { + return self->Detach(); + } delete self; } diff --git a/src/env.h b/src/env.h index caeca8a87e6201..e5e8c107f6881d 100644 --- a/src/env.h +++ b/src/env.h @@ -585,34 +585,6 @@ struct AllocatedBuffer { friend class Environment; }; -class AsyncRequest : public MemoryRetainer { - public: - AsyncRequest() = default; - ~AsyncRequest() override; - - AsyncRequest(const AsyncRequest&) = delete; - AsyncRequest& operator=(const AsyncRequest&) = delete; - AsyncRequest(AsyncRequest&&) = delete; - AsyncRequest& operator=(AsyncRequest&&) = delete; - - void Install(Environment* env, void* data, uv_async_cb target); - void Uninstall(); - void Stop(); - inline void set_stopped(bool flag); - inline bool is_stopped() const; - uv_async_t* GetHandle(); - void MemoryInfo(MemoryTracker* tracker) const override; - - - SET_MEMORY_INFO_NAME(AsyncRequest) - SET_SELF_SIZE(AsyncRequest) - - private: - Environment* env_; - uv_async_t* async_ = nullptr; - std::atomic_bool stopped_ {true}; -}; - class KVStore { public: KVStore() = default; @@ -734,8 +706,6 @@ class ImmediateInfo : public MemoryRetainer { inline uint32_t count() const; inline uint32_t ref_count() const; inline bool has_outstanding() const; - inline void count_inc(uint32_t increment); - inline void count_dec(uint32_t decrement); inline void ref_count_inc(uint32_t increment); inline void ref_count_dec(uint32_t decrement); @@ -1060,6 +1030,14 @@ class Environment : public MemoryRetainer { inline bool can_call_into_js() const; inline void set_can_call_into_js(bool can_call_into_js); + // Increase or decrease a counter that manages whether this Environment + // keeps the event loop alive on its own or not. The counter starts out at 0, + // meaning it does not, and any positive value will make it keep the event + // loop alive. + // This is used by Workers to manage their own .ref()/.unref() implementation, + // as Workers aren't directly associated with their own libuv handles. + inline void add_refs(int64_t diff); + inline bool has_run_bootstrapping_code() const; inline void set_has_run_bootstrapping_code(bool has_run_bootstrapping_code); @@ -1079,7 +1057,10 @@ class Environment : public MemoryRetainer { inline void add_sub_worker_context(worker::Worker* context); inline void remove_sub_worker_context(worker::Worker* context); void stop_sub_worker_contexts(); + template + inline void ForEachWorker(Fn&& iterator); inline bool is_stopping() const; + inline void set_stopping(bool value); inline std::list* extra_linked_bindings(); inline node_module* extra_linked_bindings_head(); inline const Mutex& extra_linked_bindings_mutex() const; @@ -1191,13 +1172,18 @@ class Environment : public MemoryRetainer { // cb will be called as cb(env) on the next event loop iteration. // keep_alive will be kept alive between now and after the callback has run. template - inline void SetImmediate(Fn&& cb, - v8::Local keep_alive = - v8::Local()); + inline void SetImmediate(Fn&& cb); template - inline void SetUnrefImmediate(Fn&& cb, - v8::Local keep_alive = - v8::Local()); + inline void SetUnrefImmediate(Fn&& cb); + template + // This behaves like SetImmediate() but can be called from any thread. + inline void SetImmediateThreadsafe(Fn&& cb); + // This behaves like V8's Isolate::RequestInterrupt(), but also accounts for + // the event loop (i.e. combines the V8 function with SetImmediate()). + // The passed callback may not throw exceptions. + // This function can be called from any thread. + template + inline void RequestInterrupt(Fn&& cb); // This needs to be available for the JS-land setImmediate(). void ToggleImmediateRef(bool ref); @@ -1222,10 +1208,14 @@ class Environment : public MemoryRetainer { inline std::shared_ptr options(); inline std::shared_ptr> inspector_host_port(); - inline AsyncRequest* thread_stopper() { return &thread_stopper_; } - inline int32_t stack_trace_limit() const { return 10; } + // The BaseObject count is a debugging helper that makes sure that there are + // no memory leaks caused by BaseObjects staying alive longer than expected + // (in particular, no circular BaseObjectPtr references). + inline void modify_base_object_count(int64_t delta); + inline int64_t base_object_count() const; + #if HAVE_INSPECTOR void set_coverage_connection( std::unique_ptr connection); @@ -1268,9 +1258,7 @@ class Environment : public MemoryRetainer { private: template - inline void CreateImmediate(Fn&& cb, - v8::Local keep_alive, - bool ref); + inline void CreateImmediate(Fn&& cb, bool ref); inline void ThrowError(v8::Local (*fun)(v8::Local), const char* errmsg); @@ -1283,7 +1271,8 @@ class Environment : public MemoryRetainer { uv_idle_t immediate_idle_handle_; uv_prepare_t idle_prepare_handle_; uv_check_t idle_check_handle_; - uv_async_t cleanup_finalization_groups_async_; + uv_async_t task_queues_async_; + int64_t task_queues_async_refs_ = 0; bool profiler_idle_notifier_started_ = false; AsyncHooks async_hooks_; @@ -1341,7 +1330,7 @@ class Environment : public MemoryRetainer { bool has_run_bootstrapping_code_ = false; bool has_serialized_options_ = false; - bool can_call_into_js_ = true; + std::atomic_bool can_call_into_js_ { true }; Flags flags_; uint64_t thread_id_; std::unordered_set sub_worker_contexts_; @@ -1418,20 +1407,39 @@ class Environment : public MemoryRetainer { template class NativeImmediateCallbackImpl final : public NativeImmediateCallback { public: - NativeImmediateCallbackImpl(Fn&& callback, - v8::Global&& keep_alive, - bool refed); + NativeImmediateCallbackImpl(Fn&& callback, bool refed); void Call(Environment* env) override; private: Fn callback_; - v8::Global keep_alive_; }; - std::unique_ptr native_immediate_callbacks_head_; - NativeImmediateCallback* native_immediate_callbacks_tail_ = nullptr; + class NativeImmediateQueue { + public: + inline std::unique_ptr Shift(); + inline void Push(std::unique_ptr cb); + // ConcatMove adds elements from 'other' to the end of this list, and clears + // 'other' afterwards. + inline void ConcatMove(NativeImmediateQueue&& other); + + // size() is atomic and may be called from any thread. + inline size_t size() const; + + private: + std::atomic size_ {0}; + std::unique_ptr head_; + NativeImmediateCallback* tail_ = nullptr; + }; + + NativeImmediateQueue native_immediates_; + Mutex native_immediates_threadsafe_mutex_; + NativeImmediateQueue native_immediates_threadsafe_; + NativeImmediateQueue native_immediates_interrupts_; - void RunAndClearNativeImmediates(); + void RunAndClearNativeImmediates(bool only_refed = false); + void RunAndClearInterrupts(); + Environment** interrupt_data_ = nullptr; + void RequestInterruptFromV8(); static void CheckImmediate(uv_check_t* handle); // Use an unordered_set, so that we have efficient insertion and removal. @@ -1441,9 +1449,8 @@ class Environment : public MemoryRetainer { uint64_t cleanup_hook_counter_ = 0; bool started_cleanup_ = false; - // A custom async abstraction (a pair of async handle and a state variable) - // Used by embedders to shutdown running Node instance. - AsyncRequest thread_stopper_; + int64_t base_object_count_ = 0; + std::atomic_bool is_stopping_ { false }; typedef std::unordered_set> ArrayBufferAllocatorList; diff --git a/src/handle_wrap.cc b/src/handle_wrap.cc index 4e01722af606c4..f5d622fc255cdf 100644 --- a/src/handle_wrap.cc +++ b/src/handle_wrap.cc @@ -83,14 +83,8 @@ void HandleWrap::Close(Local close_callback) { } -void HandleWrap::MakeWeak() { - persistent().SetWeak( - this, - [](const v8::WeakCallbackInfo& data) { - HandleWrap* handle_wrap = data.GetParameter(); - handle_wrap->persistent().Reset(); - handle_wrap->Close(); - }, v8::WeakCallbackType::kParameter); +void HandleWrap::OnGCCollect() { + Close(); } @@ -121,7 +115,9 @@ HandleWrap::HandleWrap(Environment* env, void HandleWrap::OnClose(uv_handle_t* handle) { - std::unique_ptr wrap { static_cast(handle->data) }; + BaseObjectPtr wrap { static_cast(handle->data) }; + wrap->Detach(); + Environment* env = wrap->env(); HandleScope scope(env->isolate()); Context::Scope context_scope(env->context()); @@ -131,6 +127,7 @@ void HandleWrap::OnClose(uv_handle_t* handle) { wrap->state_ = kClosed; wrap->OnClose(); + wrap->handle_wrap_queue_.Remove(); if (!wrap->persistent().IsEmpty() && wrap->object()->Has(env->context(), env->handle_onclose_symbol()) diff --git a/src/handle_wrap.h b/src/handle_wrap.h index 0dba27077801fe..a555da9479de93 100644 --- a/src/handle_wrap.h +++ b/src/handle_wrap.h @@ -78,14 +78,13 @@ class HandleWrap : public AsyncWrap { static v8::Local GetConstructorTemplate( Environment* env); - void MakeWeak(); // This hides BaseObject::MakeWeak() - protected: HandleWrap(Environment* env, v8::Local object, uv_handle_t* handle, AsyncWrap::ProviderType provider); virtual void OnClose() {} + void OnGCCollect() final; void MarkAsInitialized(); void MarkAsUninitialized(); diff --git a/src/memory_tracker-inl.h b/src/memory_tracker-inl.h index bb91e8416610fd..2e39fa21a944d8 100644 --- a/src/memory_tracker-inl.h +++ b/src/memory_tracker-inl.h @@ -109,6 +109,14 @@ void MemoryTracker::TrackField(const char* edge_name, TrackField(edge_name, value.get(), node_name); } +template +void MemoryTracker::TrackField(const char* edge_name, + const BaseObjectPtrImpl& value, + const char* node_name) { + if (value.get() == nullptr) return; + TrackField(edge_name, value.get(), node_name); +} + template void MemoryTracker::TrackField(const char* edge_name, const T& value, diff --git a/src/memory_tracker.h b/src/memory_tracker.h index e666190c416e72..a400009aad6728 100644 --- a/src/memory_tracker.h +++ b/src/memory_tracker.h @@ -30,6 +30,8 @@ namespace node { class MemoryTracker; class MemoryRetainerNode; +template +class BaseObjectPtrImpl; namespace crypto { class NodeBIO; @@ -139,6 +141,11 @@ class MemoryTracker { const std::unique_ptr& value, const char* node_name = nullptr); + template + void TrackField(const char* edge_name, + const BaseObjectPtrImpl& value, + const char* node_name = nullptr); + // For containers, the elements will be graphed as grandchildren nodes // if the container is not empty. // By default, we assume the parent count the stack size of the container diff --git a/src/node_errors.cc b/src/node_errors.cc index 7161a2d1bb7167..9d038e3d1683e7 100644 --- a/src/node_errors.cc +++ b/src/node_errors.cc @@ -270,6 +270,9 @@ static void ReportFatalException(Environment* env, Local error, Local message, EnhanceFatalException enhance_stack) { + if (!env->can_call_into_js()) + enhance_stack = EnhanceFatalException::kDontEnhance; + Isolate* isolate = env->isolate(); CHECK(!error.IsEmpty()); CHECK(!message.IsEmpty()); @@ -914,7 +917,7 @@ void TriggerUncaughtException(Isolate* isolate, } MaybeLocal handled; - { + if (env->can_call_into_js()) { // We do not expect the global uncaught exception itself to throw any more // exceptions. If it does, exit the current Node.js instance. errors::TryCatchScope try_catch(env, diff --git a/src/node_http2.cc b/src/node_http2.cc index 150237709bc31e..0eebe2935e248b 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -243,7 +243,6 @@ Http2Session::Http2Settings::Http2Settings(Environment* env, : AsyncWrap(env, obj, PROVIDER_HTTP2SETTINGS), session_(session), startTime_(start_time) { - RemoveCleanupHook(); // This object is owned by the Http2Session. Init(); } @@ -586,8 +585,6 @@ Http2Session::Http2Session(Environment* env, Http2Session::~Http2Session() { CHECK_EQ(flags_ & SESSION_STATE_HAS_SCOPE, 0); Debug(this, "freeing nghttp2 session"); - for (const auto& iter : streams_) - iter.second->session_ = nullptr; nghttp2_session_del(session_); CHECK_EQ(current_nghttp2_memory_, 0); } @@ -695,7 +692,7 @@ void Http2Session::Close(uint32_t code, bool socket_closed) { // If there are outstanding pings, those will need to be canceled, do // so on the next iteration of the event loop to avoid calling out into // javascript since this may be called during garbage collection. - while (std::unique_ptr ping = PopPing()) { + while (BaseObjectPtr ping = PopPing()) { ping->DetachFromSession(); env()->SetImmediate( [ping = std::move(ping)](Environment* env) { @@ -1451,7 +1448,7 @@ void Http2Session::HandlePingFrame(const nghttp2_frame* frame) { Local arg; bool ack = frame->hd.flags & NGHTTP2_FLAG_ACK; if (ack) { - std::unique_ptr ping = PopPing(); + BaseObjectPtr ping = PopPing(); if (!ping) { // PING Ack is unsolicited. Treat as a connection error. The HTTP/2 @@ -1490,7 +1487,7 @@ void Http2Session::HandleSettingsFrame(const nghttp2_frame* frame) { // If this is an acknowledgement, we should have an Http2Settings // object for it. - std::unique_ptr settings = PopSettings(); + BaseObjectPtr settings = PopSettings(); if (settings) { settings->Done(true); return; @@ -1553,7 +1550,8 @@ void Http2Session::MaybeScheduleWrite() { HandleScope handle_scope(env()->isolate()); Debug(this, "scheduling write"); flags_ |= SESSION_STATE_WRITE_SCHEDULED; - env()->SetImmediate([this](Environment* env) { + BaseObjectPtr strong_ref{this}; + env()->SetImmediate([this, strong_ref](Environment* env) { if (session_ == nullptr || !(flags_ & SESSION_STATE_WRITE_SCHEDULED)) { // This can happen e.g. when a stream was reset before this turn // of the event loop, in which case SendPendingData() is called early, @@ -1566,7 +1564,7 @@ void Http2Session::MaybeScheduleWrite() { HandleScope handle_scope(env->isolate()); InternalCallbackScope callback_scope(this); SendPendingData(); - }, object()); + }); } } @@ -1951,12 +1949,11 @@ Http2Stream::~Http2Stream() { nghttp2_rcbuf_decref(header.value); } - if (session_ == nullptr) + if (!session_) return; Debug(this, "tearing down stream"); session_->DecrementCurrentSessionMemory(current_headers_length_); session_->RemoveStream(this); - session_ = nullptr; } std::string Http2Stream::diagnostic_name() const { @@ -2022,7 +2019,8 @@ void Http2Stream::Destroy() { // Wait until the start of the next loop to delete because there // may still be some pending operations queued for this stream. - env()->SetImmediate([this](Environment* env) { + BaseObjectPtr strong_ref{this}; + env()->SetImmediate([this, strong_ref](Environment* env) { // Free any remaining outgoing data chunks here. This should be done // here because it's possible for destroy to have been called while // we still have queued outbound writes. @@ -2036,9 +2034,11 @@ void Http2Stream::Destroy() { // We can destroy the stream now if there are no writes for it // already on the socket. Otherwise, we'll wait for the garbage collector // to take care of cleaning up. - if (session() == nullptr || !session()->HasWritesOnSocketForStream(this)) - delete this; - }, object()); + if (session() == nullptr || !session()->HasWritesOnSocketForStream(this)) { + // Delete once strong_ref goes out of scope. + Detach(); + } + }); statistics_.end_time = uv_hrtime(); session_->statistics_.stream_average_duration = @@ -2164,8 +2164,10 @@ Http2Stream* Http2Stream::SubmitPushPromise(nghttp2_nv* nva, id_, nva, len, nullptr); CHECK_NE(*ret, NGHTTP2_ERR_NOMEM); Http2Stream* stream = nullptr; - if (*ret > 0) - stream = Http2Stream::New(session_, *ret, NGHTTP2_HCAT_HEADERS, options); + if (*ret > 0) { + stream = Http2Stream::New( + session_.get(), *ret, NGHTTP2_HCAT_HEADERS, options); + } return stream; } @@ -2832,7 +2834,8 @@ void Http2Session::Ping(const FunctionCallbackInfo& args) { if (obj->Set(env->context(), env->ondone_string(), args[1]).IsNothing()) return; - Http2Ping* ping = session->AddPing(std::make_unique(session, obj)); + Http2Ping* ping = session->AddPing( + MakeDetachedBaseObject(session, obj)); // To prevent abuse, we strictly limit the number of unacknowledged PING // frames that may be sent at any given time. This is configurable in the // Options when creating a Http2Session. @@ -2861,16 +2864,16 @@ void Http2Session::Settings(const FunctionCallbackInfo& args) { if (obj->Set(env->context(), env->ondone_string(), args[0]).IsNothing()) return; - Http2Session::Http2Settings* settings = session->AddSettings( - std::make_unique(session->env(), session, obj, 0)); + Http2Settings* settings = session->AddSettings( + MakeDetachedBaseObject(session->env(), session, obj, 0)); if (settings == nullptr) return args.GetReturnValue().Set(false); settings->Send(); args.GetReturnValue().Set(true); } -std::unique_ptr Http2Session::PopPing() { - std::unique_ptr ping; +BaseObjectPtr Http2Session::PopPing() { + BaseObjectPtr ping; if (!outstanding_pings_.empty()) { ping = std::move(outstanding_pings_.front()); outstanding_pings_.pop(); @@ -2880,7 +2883,7 @@ std::unique_ptr Http2Session::PopPing() { } Http2Session::Http2Ping* Http2Session::AddPing( - std::unique_ptr ping) { + BaseObjectPtr ping) { if (outstanding_pings_.size() == max_outstanding_pings_) { ping->Done(false); return nullptr; @@ -2891,8 +2894,8 @@ Http2Session::Http2Ping* Http2Session::AddPing( return ptr; } -std::unique_ptr Http2Session::PopSettings() { - std::unique_ptr settings; +BaseObjectPtr Http2Session::PopSettings() { + BaseObjectPtr settings; if (!outstanding_settings_.empty()) { settings = std::move(outstanding_settings_.front()); outstanding_settings_.pop(); @@ -2902,7 +2905,7 @@ std::unique_ptr Http2Session::PopSettings() { } Http2Session::Http2Settings* Http2Session::AddSettings( - std::unique_ptr settings) { + BaseObjectPtr settings) { if (outstanding_settings_.size() == max_outstanding_settings_) { settings->Done(false); return nullptr; @@ -2917,7 +2920,6 @@ Http2Session::Http2Ping::Http2Ping(Http2Session* session, Local obj) : AsyncWrap(session->env(), obj, AsyncWrap::PROVIDER_HTTP2PING), session_(session), startTime_(uv_hrtime()) { - RemoveCleanupHook(); // This object is owned by the Http2Session. } void Http2Session::Http2Ping::Send(const uint8_t* payload) { diff --git a/src/node_http2.h b/src/node_http2.h index c6b9938d2d8ef8..045bdfd716da03 100644 --- a/src/node_http2.h +++ b/src/node_http2.h @@ -460,8 +460,8 @@ class Http2Stream : public AsyncWrap, nghttp2_stream* operator*(); - Http2Session* session() { return session_; } - const Http2Session* session() const { return session_; } + Http2Session* session() { return session_.get(); } + const Http2Session* session() const { return session_.get(); } void EmitStatistics(); @@ -614,7 +614,7 @@ class Http2Stream : public AsyncWrap, nghttp2_headers_category category, int options); - Http2Session* session_ = nullptr; // The Parent HTTP/2 Session + BaseObjectWeakPtr session_; // The Parent HTTP/2 Session int32_t id_ = 0; // The Stream Identifier int32_t code_ = NGHTTP2_NO_ERROR; // The RST_STREAM code (if any) int flags_ = NGHTTP2_STREAM_FLAG_NONE; // Internal state flags @@ -846,11 +846,11 @@ class Http2Session : public AsyncWrap, return env()->event_loop(); } - std::unique_ptr PopPing(); - Http2Ping* AddPing(std::unique_ptr ping); + BaseObjectPtr PopPing(); + Http2Ping* AddPing(BaseObjectPtr ping); - std::unique_ptr PopSettings(); - Http2Settings* AddSettings(std::unique_ptr settings); + BaseObjectPtr PopSettings(); + Http2Settings* AddSettings(BaseObjectPtr settings); void IncrementCurrentSessionMemory(uint64_t amount) { current_session_memory_ += amount; @@ -1027,10 +1027,10 @@ class Http2Session : public AsyncWrap, size_t stream_buf_offset_ = 0; size_t max_outstanding_pings_ = DEFAULT_MAX_PINGS; - std::queue> outstanding_pings_; + std::queue> outstanding_pings_; size_t max_outstanding_settings_ = DEFAULT_MAX_SETTINGS; - std::queue> outstanding_settings_; + std::queue> outstanding_settings_; std::vector outgoing_buffers_; std::vector outgoing_storage_; diff --git a/src/node_messaging.cc b/src/node_messaging.cc index b51607cc0ee371..f55cb7ed9bd08c 100644 --- a/src/node_messaging.cc +++ b/src/node_messaging.cc @@ -927,6 +927,10 @@ void MessagePort::Entangle(MessagePort* a, MessagePortData* b) { MessagePortData::Entangle(a->data_.get(), b); } +void MessagePort::MemoryInfo(MemoryTracker* tracker) const { + tracker->TrackField("data", data_); +} + Local GetMessagePortConstructorTemplate(Environment* env) { // Factor generating the MessagePort JS constructor into its own piece // of code, because it is needed early on in the child environment setup. diff --git a/src/node_messaging.h b/src/node_messaging.h index 054521b0563c42..d64bf23e086d05 100644 --- a/src/node_messaging.h +++ b/src/node_messaging.h @@ -192,10 +192,7 @@ class MessagePort : public HandleWrap { // NULL pointer to the C++ MessagePort object is also detached. inline bool IsDetached() const; - void MemoryInfo(MemoryTracker* tracker) const override { - tracker->TrackField("data", data_); - } - + void MemoryInfo(MemoryTracker* tracker) const override; SET_MEMORY_INFO_NAME(MessagePort) SET_SELF_SIZE(MessagePort) diff --git a/src/node_process_events.cc b/src/node_process_events.cc index b090b6c24b3383..d192ef19b7abad 100644 --- a/src/node_process_events.cc +++ b/src/node_process_events.cc @@ -35,6 +35,8 @@ Maybe ProcessEmitWarningGeneric(Environment* env, const char* warning, const char* type, const char* code) { + if (!env->can_call_into_js()) return Just(false); + HandleScope handle_scope(env->isolate()); Context::Scope context_scope(env->context()); diff --git a/src/node_report.cc b/src/node_report.cc index 3a6cb42556461d..c29f866f4a8dad 100644 --- a/src/node_report.cc +++ b/src/node_report.cc @@ -4,6 +4,8 @@ #include "diagnosticfilename-inl.h" #include "node_internals.h" #include "node_metadata.h" +#include "node_mutex.h" +#include "node_worker.h" #include "util.h" #ifdef _WIN32 @@ -19,18 +21,20 @@ #include #include -constexpr int NODE_REPORT_VERSION = 1; +constexpr int NODE_REPORT_VERSION = 2; constexpr int NANOS_PER_SEC = 1000 * 1000 * 1000; constexpr double SEC_PER_MICROS = 1e-6; namespace report { using node::arraysize; +using node::ConditionVariable; using node::DiagnosticFilename; using node::Environment; using node::Mutex; using node::NativeSymbolDebuggingContext; using node::PerIsolateOptions; using node::TIME_TYPE; +using node::worker::Worker; using v8::HeapSpaceStatistics; using v8::HeapStatistics; using v8::Isolate; @@ -210,6 +214,10 @@ static void WriteNodeReport(Isolate* isolate, // Report native process ID writer.json_keyvalue("processId", pid); + if (env != nullptr) + writer.json_keyvalue("threadId", env->thread_id()); + else + writer.json_keyvalue("threadId", JSONWriter::Null{}); { // Report the process cwd. @@ -259,6 +267,39 @@ static void WriteNodeReport(Isolate* isolate, writer.json_arrayend(); + writer.json_arraystart("workers"); + if (env != nullptr) { + Mutex workers_mutex; + ConditionVariable notify; + std::vector worker_infos; + size_t expected_results = 0; + + env->ForEachWorker([&](Worker* w) { + expected_results += w->RequestInterrupt([&](Environment* env) { + std::ostringstream os; + + GetNodeReport(env->isolate(), + env, + "Worker thread subreport", + trigger, + Local(), + os); + + Mutex::ScopedLock lock(workers_mutex); + worker_infos.emplace_back(os.str()); + notify.Signal(lock); + }); + }); + + Mutex::ScopedLock lock(workers_mutex); + worker_infos.reserve(expected_results); + while (worker_infos.size() < expected_results) + notify.Wait(lock); + for (const std::string& worker_info : worker_infos) + writer.json_element(JSONWriter::ForeignJSON { worker_info }); + } + writer.json_arrayend(); + // Report operating system information PrintSystemInformation(&writer); diff --git a/src/node_report.h b/src/node_report.h index 4cb82470f43594..46b69b9681db51 100644 --- a/src/node_report.h +++ b/src/node_report.h @@ -44,6 +44,7 @@ void GetNodeReport(v8::Isolate* isolate, // Function declarations - utility functions in src/node_report_utils.cc void WalkHandle(uv_handle_t* h, void* arg); std::string EscapeJsonChars(const std::string& str); +std::string Reindent(const std::string& str, int indentation); template std::string ValueToHexString(T value) { @@ -146,6 +147,10 @@ class JSONWriter { struct Null {}; // Usable as a JSON value. + struct ForeignJSON { + std::string as_string; + }; + private: template env_ = nullptr; } - env_->thread_stopper()->set_stopped(true); + env_->set_stopping(true); env_->stop_sub_worker_contexts(); env_->RunCleanup(); RunAtExit(env_.get()); @@ -424,7 +424,6 @@ void Worker::JoinThread() { thread_joined_ = true; env()->remove_sub_worker_context(this); - on_thread_finished_.Uninstall(); { HandleScope handle_scope(env()->isolate()); @@ -630,18 +629,16 @@ void Worker::StartThread(const FunctionCallbackInfo& args) { w->stopped_ = false; w->thread_joined_ = false; - w->on_thread_finished_.Install(w->env(), w, [](uv_async_t* handle) { - Worker* w_ = static_cast(handle->data); - CHECK(w_->is_stopped()); - w_->parent_port_ = nullptr; - w_->JoinThread(); - delete w_; - }); + if (w->has_ref_) + w->env()->add_refs(1); uv_thread_options_t thread_options; thread_options.flags = UV_THREAD_HAS_STACK_SIZE; thread_options.stack_size = kStackSize; CHECK_EQ(uv_thread_create_ex(&w->tid_, &thread_options, [](void* arg) { + // XXX: This could become a std::unique_ptr, but that makes at least + // gcc 6.3 detect undefined behaviour when there shouldn't be any. + // gcc 7+ handles this well. Worker* w = static_cast(arg); const uintptr_t stack_top = reinterpret_cast(&arg); @@ -652,7 +649,13 @@ void Worker::StartThread(const FunctionCallbackInfo& args) { w->Run(); Mutex::ScopedLock lock(w->mutex_); - w->on_thread_finished_.Stop(); + w->env()->SetImmediateThreadsafe( + [w = std::unique_ptr(w)](Environment* env) { + if (w->has_ref_) + env->add_refs(-1); + w->JoinThread(); + // implicitly delete w + }); }, static_cast(w)), 0); } @@ -667,13 +670,19 @@ void Worker::StopThread(const FunctionCallbackInfo& args) { void Worker::Ref(const FunctionCallbackInfo& args) { Worker* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.This()); - uv_ref(reinterpret_cast(w->on_thread_finished_.GetHandle())); + if (!w->has_ref_) { + w->has_ref_ = true; + w->env()->add_refs(1); + } } void Worker::Unref(const FunctionCallbackInfo& args) { Worker* w; ASSIGN_OR_RETURN_UNWRAP(&w, args.This()); - uv_unref(reinterpret_cast(w->on_thread_finished_.GetHandle())); + if (w->has_ref_) { + w->has_ref_ = false; + w->env()->add_refs(-1); + } } void Worker::GetResourceLimits(const FunctionCallbackInfo& args) { @@ -699,6 +708,10 @@ void Worker::Exit(int code) { } } +void Worker::MemoryInfo(MemoryTracker* tracker) const { + tracker->TrackField("parent_port", parent_port_); +} + namespace { // Return the MessagePort that is global for this Environment and communicates diff --git a/src/node_worker.h b/src/node_worker.h index ff2864e3193174..863e2cffde0f36 100644 --- a/src/node_worker.h +++ b/src/node_worker.h @@ -40,11 +40,10 @@ class Worker : public AsyncWrap { // Wait for the worker thread to stop (in a blocking manner). void JoinThread(); - void MemoryInfo(MemoryTracker* tracker) const override { - tracker->TrackField("parent_port", parent_port_); - tracker->TrackInlineField(&on_thread_finished_, "on_thread_finished_"); - } + template + inline bool RequestInterrupt(Fn&& cb); + void MemoryInfo(MemoryTracker* tracker) const override; SET_MEMORY_INFO_NAME(Worker) SET_SELF_SIZE(Worker) @@ -109,14 +108,14 @@ class Worker : public AsyncWrap { // instance refers to it via its [kPort] property. MessagePort* parent_port_ = nullptr; - AsyncRequest on_thread_finished_; - // A raw flag that is used by creator and worker threads to // sync up on pre-mature termination of worker - while in the // warmup phase. Once the worker is fully warmed up, use the // async handle of the worker's Environment for the same purpose. bool stopped_ = true; + bool has_ref_ = true; + // The real Environment of the worker object. It has a lesser // lifespan than the worker object itself - comes to life // when the worker thread creates a new Environment, and gets @@ -126,6 +125,14 @@ class Worker : public AsyncWrap { friend class WorkerThreadData; }; +template +bool Worker::RequestInterrupt(Fn&& cb) { + Mutex::ScopedLock lock(mutex_); + if (env_ == nullptr) return false; + env_->RequestInterrupt(std::move(cb)); + return true; +} + } // namespace worker } // namespace node diff --git a/src/stream_pipe.cc b/src/stream_pipe.cc index 504b840972f22d..5f7514b1b84790 100644 --- a/src/stream_pipe.cc +++ b/src/stream_pipe.cc @@ -42,7 +42,7 @@ StreamPipe::StreamPipe(StreamBase* source, } StreamPipe::~StreamPipe() { - Unpipe(); + Unpipe(true); } StreamBase* StreamPipe::source() { @@ -53,7 +53,7 @@ StreamBase* StreamPipe::sink() { return static_cast(writable_listener_.stream()); } -void StreamPipe::Unpipe() { +void StreamPipe::Unpipe(bool is_in_deletion) { if (is_closed_) return; @@ -69,10 +69,13 @@ void StreamPipe::Unpipe() { if (pending_writes_ == 0) sink()->RemoveStreamListener(&writable_listener_); + if (is_in_deletion) return; + // Delay the JS-facing part with SetImmediate, because this might be from // inside the garbage collector, so we can’t run JS here. HandleScope handle_scope(env()->isolate()); - env()->SetImmediate([this](Environment* env) { + BaseObjectPtr strong_ref{this}; + env()->SetImmediate([this, strong_ref](Environment* env) { HandleScope handle_scope(env->isolate()); Context::Scope context_scope(env->context()); Local object = this->object(); @@ -106,7 +109,7 @@ void StreamPipe::Unpipe() { .IsNothing()) { return; } - }, object()); + }); } uv_buf_t StreamPipe::ReadableListener::OnStreamAlloc(size_t suggested_size) { diff --git a/src/stream_pipe.h b/src/stream_pipe.h index 4cc5668c4c5137..e22abab0115c8a 100644 --- a/src/stream_pipe.h +++ b/src/stream_pipe.h @@ -12,7 +12,7 @@ class StreamPipe : public AsyncWrap { StreamPipe(StreamBase* source, StreamBase* sink, v8::Local obj); ~StreamPipe() override; - void Unpipe(); + void Unpipe(bool is_in_deletion = false); static void New(const v8::FunctionCallbackInfo& args); static void Start(const v8::FunctionCallbackInfo& args); diff --git a/src/tls_wrap.cc b/src/tls_wrap.cc index 29b7cf3f0d60ad..36877424216ed4 100644 --- a/src/tls_wrap.cc +++ b/src/tls_wrap.cc @@ -320,9 +320,10 @@ void TLSWrap::EncOut() { // its not clear if it is always correct. Not calling Done() could block // data flow, so for now continue to call Done(), just do it in the next // tick. - env()->SetImmediate([this](Environment* env) { + BaseObjectPtr strong_ref{this}; + env()->SetImmediate([this, strong_ref](Environment* env) { InvokeQueued(0); - }, object()); + }); } } return; @@ -353,9 +354,10 @@ void TLSWrap::EncOut() { HandleScope handle_scope(env()->isolate()); // Simulate asynchronous finishing, TLS cannot handle this at the moment. - env()->SetImmediate([this](Environment* env) { + BaseObjectPtr strong_ref{this}; + env()->SetImmediate([this, strong_ref](Environment* env) { OnStreamAfterWrite(nullptr, 0); - }, object()); + }); } } @@ -730,9 +732,10 @@ int TLSWrap::DoWrite(WriteWrap* w, StreamWriteResult res = underlying_stream()->Write(bufs, count, send_handle); if (!res.async) { - env()->SetImmediate([this](Environment* env) { + BaseObjectPtr strong_ref{this}; + env()->SetImmediate([this, strong_ref](Environment* env) { OnStreamAfterWrite(current_empty_write_, 0); - }, object()); + }); } return 0; } diff --git a/test/cctest/node_test_fixture.h b/test/cctest/node_test_fixture.h index 465f1c0b45b36c..291bc5962bed77 100644 --- a/test/cctest/node_test_fixture.h +++ b/test/cctest/node_test_fixture.h @@ -110,8 +110,8 @@ class NodeTestFixture : public ::testing::Test { } void TearDown() override { - isolate_->Exit(); platform->DrainTasks(isolate_); + isolate_->Exit(); platform->UnregisterIsolate(isolate_); isolate_->Dispose(); isolate_ = nullptr; diff --git a/test/cctest/test_base_object_ptr.cc b/test/cctest/test_base_object_ptr.cc new file mode 100644 index 00000000000000..18e27edba8cd53 --- /dev/null +++ b/test/cctest/test_base_object_ptr.cc @@ -0,0 +1,176 @@ +#include "gtest/gtest.h" +#include "node.h" +#include "base_object-inl.h" +#include "node_test_fixture.h" + +using node::BaseObject; +using node::BaseObjectPtr; +using node::BaseObjectWeakPtr; +using node::Environment; +using node::MakeBaseObject; +using node::MakeDetachedBaseObject; +using v8::HandleScope; +using v8::Isolate; +using v8::Local; +using v8::Object; + +class BaseObjectPtrTest : public EnvironmentTestFixture {}; + +class DummyBaseObject : public BaseObject { + public: + DummyBaseObject(Environment* env, Local obj) : BaseObject(env, obj) {} + + static Local MakeJSObject(Environment* env) { + return BaseObject::MakeLazilyInitializedJSTemplate(env) + ->GetFunction(env->context()).ToLocalChecked() + ->NewInstance(env->context()).ToLocalChecked(); + } + + static BaseObjectPtr NewDetached(Environment* env) { + Local obj = MakeJSObject(env); + return MakeDetachedBaseObject(env, obj); + } + + static BaseObjectPtr New(Environment* env) { + Local obj = MakeJSObject(env); + return MakeBaseObject(env, obj); + } + + SET_NO_MEMORY_INFO() + SET_MEMORY_INFO_NAME(DummyBaseObject) + SET_SELF_SIZE(DummyBaseObject) +}; + +TEST_F(BaseObjectPtrTest, ScopedDetached) { + const HandleScope handle_scope(isolate_); + const Argv argv; + Env env_{handle_scope, argv}; + Environment* env = *env_; + + EXPECT_EQ(env->base_object_count(), 0); + { + BaseObjectPtr ptr = DummyBaseObject::NewDetached(env); + EXPECT_EQ(env->base_object_count(), 1); + } + EXPECT_EQ(env->base_object_count(), 0); +} + +TEST_F(BaseObjectPtrTest, ScopedDetachedWithWeak) { + const HandleScope handle_scope(isolate_); + const Argv argv; + Env env_{handle_scope, argv}; + Environment* env = *env_; + + BaseObjectWeakPtr weak_ptr; + + EXPECT_EQ(env->base_object_count(), 0); + { + BaseObjectPtr ptr = DummyBaseObject::NewDetached(env); + weak_ptr = ptr; + EXPECT_EQ(env->base_object_count(), 1); + } + EXPECT_EQ(weak_ptr.get(), nullptr); + EXPECT_EQ(env->base_object_count(), 0); +} + +TEST_F(BaseObjectPtrTest, Undetached) { + const HandleScope handle_scope(isolate_); + const Argv argv; + Env env_{handle_scope, argv}; + Environment* env = *env_; + + node::AddEnvironmentCleanupHook(isolate_, [](void* arg) { + EXPECT_EQ(static_cast(arg)->base_object_count(), 0); + }, env); + + BaseObjectPtr ptr = DummyBaseObject::New(env); + EXPECT_EQ(env->base_object_count(), 1); +} + +TEST_F(BaseObjectPtrTest, GCWeak) { + const HandleScope handle_scope(isolate_); + const Argv argv; + Env env_{handle_scope, argv}; + Environment* env = *env_; + + BaseObjectWeakPtr weak_ptr; + + { + const HandleScope handle_scope(isolate_); + BaseObjectPtr ptr = DummyBaseObject::New(env); + weak_ptr = ptr; + ptr->MakeWeak(); + + EXPECT_EQ(env->base_object_count(), 1); + EXPECT_EQ(weak_ptr.get(), ptr.get()); + EXPECT_EQ(weak_ptr->persistent().IsWeak(), false); + + ptr.reset(); + } + + EXPECT_EQ(env->base_object_count(), 1); + EXPECT_NE(weak_ptr.get(), nullptr); + EXPECT_EQ(weak_ptr->persistent().IsWeak(), true); + + v8::V8::SetFlagsFromString("--expose-gc"); + isolate_->RequestGarbageCollectionForTesting(Isolate::kFullGarbageCollection); + + EXPECT_EQ(env->base_object_count(), 0); + EXPECT_EQ(weak_ptr.get(), nullptr); +} + +TEST_F(BaseObjectPtrTest, Moveable) { + const HandleScope handle_scope(isolate_); + const Argv argv; + Env env_{handle_scope, argv}; + Environment* env = *env_; + + BaseObjectPtr ptr = DummyBaseObject::NewDetached(env); + EXPECT_EQ(env->base_object_count(), 1); + BaseObjectWeakPtr weak_ptr { ptr }; + EXPECT_EQ(weak_ptr.get(), ptr.get()); + + BaseObjectPtr ptr2 = std::move(ptr); + EXPECT_EQ(weak_ptr.get(), ptr2.get()); + EXPECT_EQ(ptr.get(), nullptr); + + BaseObjectWeakPtr weak_ptr2 = std::move(weak_ptr); + EXPECT_EQ(weak_ptr2.get(), ptr2.get()); + EXPECT_EQ(weak_ptr.get(), nullptr); + EXPECT_EQ(env->base_object_count(), 1); + + ptr2.reset(); + + EXPECT_EQ(weak_ptr2.get(), nullptr); + EXPECT_EQ(env->base_object_count(), 0); +} + +TEST_F(BaseObjectPtrTest, NestedClasses) { + class ObjectWithPtr : public BaseObject { + public: + ObjectWithPtr(Environment* env, Local obj) : BaseObject(env, obj) {} + + BaseObjectPtr ptr1; + BaseObjectPtr ptr2; + + SET_NO_MEMORY_INFO() + SET_MEMORY_INFO_NAME(ObjectWithPtr) + SET_SELF_SIZE(ObjectWithPtr) + }; + + const HandleScope handle_scope(isolate_); + const Argv argv; + Env env_{handle_scope, argv}; + Environment* env = *env_; + + node::AddEnvironmentCleanupHook(isolate_, [](void* arg) { + EXPECT_EQ(static_cast(arg)->base_object_count(), 0); + }, env); + + ObjectWithPtr* obj = + new ObjectWithPtr(env, DummyBaseObject::MakeJSObject(env)); + obj->ptr1 = DummyBaseObject::NewDetached(env); + obj->ptr2 = DummyBaseObject::New(env); + + EXPECT_EQ(env->base_object_count(), 3); +} diff --git a/test/cctest/test_environment.cc b/test/cctest/test_environment.cc index 8bc1611330be31..a15e56a772ed19 100644 --- a/test/cctest/test_environment.cc +++ b/test/cctest/test_environment.cc @@ -218,3 +218,26 @@ TEST_F(EnvironmentTest, BufferWithFreeCallbackIsDetached) { CHECK_EQ(callback_calls, 1); CHECK_EQ(ab->ByteLength(), 0); } + +TEST_F(EnvironmentTest, SetImmediateCleanup) { + int called = 0; + int called_unref = 0; + + { + const v8::HandleScope handle_scope(isolate_); + const Argv argv; + Env env {handle_scope, argv}; + + (*env)->SetImmediate([&](node::Environment* env_arg) { + EXPECT_EQ(env_arg, *env); + called++; + }); + (*env)->SetUnrefImmediate([&](node::Environment* env_arg) { + EXPECT_EQ(env_arg, *env); + called_unref++; + }); + } + + EXPECT_EQ(called, 1); + EXPECT_EQ(called_unref, 0); +} diff --git a/test/cctest/test_node_postmortem_metadata.cc b/test/cctest/test_node_postmortem_metadata.cc index f33d40eb5c23fe..3fb67ecbca265e 100644 --- a/test/cctest/test_node_postmortem_metadata.cc +++ b/test/cctest/test_node_postmortem_metadata.cc @@ -93,14 +93,13 @@ TEST_F(DebugSymbolsTest, BaseObjectPersistentHandle) { v8::Local object = obj_templ->NewInstance(env.context()).ToLocalChecked(); - DummyBaseObject obj(*env, object); + node::BaseObjectPtr obj = + node::MakeDetachedBaseObject(*env, object); - auto expected = reinterpret_cast(&obj.persistent()); - auto calculated = reinterpret_cast(&obj) + + auto expected = reinterpret_cast(&obj->persistent()); + auto calculated = reinterpret_cast(obj.get()) + nodedbg_offset_BaseObject__persistent_handle___v8_Persistent_v8_Object; EXPECT_EQ(expected, calculated); - - obj.persistent().Reset(); // ~BaseObject() expects an empty handle. } diff --git a/test/common/report.js b/test/common/report.js index fdb15924cab770..c117d7b76459bc 100644 --- a/test/common/report.js +++ b/test/common/report.js @@ -53,7 +53,7 @@ function _validateContent(report) { // Verify that all sections are present as own properties of the report. const sections = ['header', 'javascriptStack', 'nativeStack', 'javascriptHeap', 'libuv', 'environmentVariables', - 'sharedObjects', 'resourceUsage']; + 'sharedObjects', 'resourceUsage', 'workers']; if (!isWindows) sections.push('userLimits'); @@ -74,9 +74,9 @@ function _validateContent(report) { 'componentVersions', 'release', 'osName', 'osRelease', 'osVersion', 'osMachine', 'cpus', 'host', 'glibcVersionRuntime', 'glibcVersionCompiler', 'cwd', - 'reportVersion', 'networkInterfaces']; + 'reportVersion', 'networkInterfaces', 'threadId']; checkForUnknownFields(header, headerFields); - assert.strictEqual(header.reportVersion, 1); // Increment as needed. + assert.strictEqual(header.reportVersion, 2); // Increment as needed. assert.strictEqual(typeof header.event, 'string'); assert.strictEqual(typeof header.trigger, 'string'); assert(typeof header.filename === 'string' || header.filename === null); @@ -84,6 +84,7 @@ function _validateContent(report) { 'Invalid Date'); assert(String(+header.dumpEventTimeStamp), header.dumpEventTimeStamp); assert(Number.isSafeInteger(header.processId)); + assert(Number.isSafeInteger(header.threadId) || header.threadId === null); assert.strictEqual(typeof header.cwd, 'string'); assert(Array.isArray(header.commandLine)); header.commandLine.forEach((arg) => { @@ -253,6 +254,10 @@ function _validateContent(report) { report.sharedObjects.forEach((sharedObject) => { assert.strictEqual(typeof sharedObject, 'string'); }); + + // Verify the format of the workers section. + assert(Array.isArray(report.workers)); + report.workers.forEach(_validateContent); } function checkForUnknownFields(actual, expected) { diff --git a/test/parallel/test-worker-exit-event-error.js b/test/parallel/test-worker-exit-event-error.js new file mode 100644 index 00000000000000..e2427c7dff726b --- /dev/null +++ b/test/parallel/test-worker-exit-event-error.js @@ -0,0 +1,8 @@ +'use strict'; +const common = require('../common'); +const { Worker } = require('worker_threads'); + +process.on('uncaughtException', common.mustCall()); + +new Worker('', { eval: true }) + .on('exit', common.mustCall(() => { throw new Error('foo'); })); diff --git a/test/report/test-report-worker.js b/test/report/test-report-worker.js new file mode 100644 index 00000000000000..a34c05f08431de --- /dev/null +++ b/test/report/test-report-worker.js @@ -0,0 +1,50 @@ +// Flags: --experimental-report +'use strict'; +const common = require('../common'); +common.skipIfReportDisabled(); +const assert = require('assert'); +const { Worker } = require('worker_threads'); +const { once } = require('events'); +const helper = require('../common/report'); + +async function basic() { + // Test that the report includes basic information about Worker threads. + + const w = new Worker(` + const { parentPort } = require('worker_threads'); + parentPort.once('message', () => { + /* Wait for message to stop the Worker */ + }); + `, { eval: true }); + + await once(w, 'online'); + + const report = process.report.getReport(); + helper.validateContent(report); + assert.strictEqual(report.workers.length, 1); + helper.validateContent(report.workers[0]); + + w.postMessage({}); + + await once(w, 'exit'); +} + +async function interruptingJS() { + // Test that the report also works when Worker threads are busy in JS land. + + const w = new Worker('while (true);', { eval: true }); + + await once(w, 'online'); + + const report = process.report.getReport(); + helper.validateContent(report); + assert.strictEqual(report.workers.length, 1); + helper.validateContent(report.workers[0]); + + await w.terminate(); +} + +(async function() { + await basic(); + await interruptingJS(); +})().then(common.mustCall());