diff --git a/esphome/core/scheduler.cpp b/esphome/core/scheduler.cpp index a907b89b02..7e2a805793 100644 --- a/esphome/core/scheduler.cpp +++ b/esphome/core/scheduler.cpp @@ -79,8 +79,22 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type return; } + // Get fresh timestamp BEFORE taking lock - millis_64_ may need to acquire lock itself + const uint64_t now = this->millis_64_(millis()); + + // Take lock early to protect scheduler_item_pool_ access + LockGuard guard{this->lock_}; + // Create and populate the scheduler item - auto item = make_unique(); + std::unique_ptr item; + if (!this->scheduler_item_pool_.empty()) { + // Reuse from pool + item = std::move(this->scheduler_item_pool_.back()); + this->scheduler_item_pool_.pop_back(); + } else { + // Allocate new if pool is empty + item = make_unique(); + } item->component = component; item->set_name(name_cstr, !is_static_string); item->type = type; @@ -99,7 +113,6 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type // Single-core platforms don't need thread-safe defer handling if (delay == 0 && type == SchedulerItem::TIMEOUT) { // Put in defer queue for guaranteed FIFO execution - LockGuard guard{this->lock_}; if (!skip_cancel) { this->cancel_item_locked_(component, name_cstr, type); } @@ -108,9 +121,6 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type } #endif /* not ESPHOME_THREAD_SINGLE */ - // Get fresh timestamp for new timer/interval - ensures accurate scheduling - const auto now = this->millis_64_(millis()); // Fresh millis() call - // Type-specific setup if (type == SchedulerItem::INTERVAL) { item->interval = delay; @@ -142,8 +152,6 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type } #endif /* ESPHOME_DEBUG_SCHEDULER */ - LockGuard guard{this->lock_}; - // For retries, check if there's a cancelled timeout first if (is_retry && name_cstr != nullptr && type == SchedulerItem::TIMEOUT && (has_cancelled_timeout_in_container_(this->items_, component, name_cstr, /* match_retry= */ true) || @@ -335,11 +343,11 @@ void HOT Scheduler::call(uint32_t now) { #ifdef ESPHOME_THREAD_MULTI_ATOMICS const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed); const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed); - ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64, - major_dbg, last_dbg); + ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), + this->scheduler_item_pool_.size(), now_64, major_dbg, last_dbg); #else /* not ESPHOME_THREAD_MULTI_ATOMICS */ - ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64, - this->millis_major_, this->last_millis_); + ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), + this->scheduler_item_pool_.size(), now_64, this->millis_major_, this->last_millis_); #endif /* else ESPHOME_THREAD_MULTI_ATOMICS */ // Cleanup before debug output this->cleanup_(); @@ -380,10 +388,13 @@ void HOT Scheduler::call(uint32_t now) { std::vector> valid_items; - // Move all non-removed items to valid_items + // Move all non-removed items to valid_items, recycle removed ones for (auto &item : this->items_) { - if (!item->remove) { + if (!is_item_removed_(item.get())) { valid_items.push_back(std::move(item)); + } else { + // Recycle removed items + this->recycle_item_(std::move(item)); } } @@ -469,6 +480,9 @@ void HOT Scheduler::call(uint32_t now) { // Add new item directly to to_add_ // since we have the lock held this->to_add_.push_back(std::move(item)); + } else { + // Timeout completed - recycle it + this->recycle_item_(std::move(item)); } } } @@ -518,6 +532,10 @@ size_t HOT Scheduler::cleanup_() { } void HOT Scheduler::pop_raw_() { std::pop_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp); + + // Instead of destroying, recycle the item + this->recycle_item_(std::move(this->items_.back())); + this->items_.pop_back(); } @@ -552,18 +570,14 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c // Check all containers for matching items #ifndef ESPHOME_THREAD_SINGLE - // Only check defer queue for timeouts (intervals never go there) + // Cancel and immediately recycle items in defer queue if (type == SchedulerItem::TIMEOUT) { - for (auto &item : this->defer_queue_) { - if (this->matches_item_(item, component, name_cstr, type, match_retry)) { - this->mark_item_removed_(item.get()); - total_cancelled++; - } - } + total_cancelled += + this->cancel_and_recycle_from_container_(this->defer_queue_, component, name_cstr, type, match_retry); } #endif /* not ESPHOME_THREAD_SINGLE */ - // Cancel items in the main heap + // Cancel items in the main heap (can't recycle immediately due to heap structure) for (auto &item : this->items_) { if (this->matches_item_(item, component, name_cstr, type, match_retry)) { this->mark_item_removed_(item.get()); @@ -572,14 +586,8 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c } } - // Cancel items in to_add_ - for (auto &item : this->to_add_) { - if (this->matches_item_(item, component, name_cstr, type, match_retry)) { - this->mark_item_removed_(item.get()); - total_cancelled++; - // Don't track removals for to_add_ items - } - } + // Cancel and immediately recycle items in to_add_ since they're not in heap yet + total_cancelled += this->cancel_and_recycle_from_container_(this->to_add_, component, name_cstr, type, match_retry); return total_cancelled > 0; } @@ -747,4 +755,24 @@ bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr &a, return a->next_execution_ > b->next_execution_; } +void Scheduler::recycle_item_(std::unique_ptr item) { + if (!item) + return; + + // Pool size of 8 is a balance between memory usage and performance: + // - Small enough to not waste memory on simple configs (1-2 timers) + // - Large enough to handle complex setups with multiple sensors/components + // - Prevents system-wide stalls from heap allocation/deallocation that can + // disrupt task synchronization and cause dropped events + static constexpr size_t MAX_POOL_SIZE = 8; + if (this->scheduler_item_pool_.size() < MAX_POOL_SIZE) { + // Clear callback to release captured resources + item->callback = nullptr; + // Clear dynamic name if any + item->clear_dynamic_name(); + this->scheduler_item_pool_.push_back(std::move(item)); + } + // else: unique_ptr will delete the item when it goes out of scope +} + } // namespace esphome diff --git a/esphome/core/scheduler.h b/esphome/core/scheduler.h index f469a60d5c..50cc8da366 100644 --- a/esphome/core/scheduler.h +++ b/esphome/core/scheduler.h @@ -5,6 +5,7 @@ #include #include #include +#include #ifdef ESPHOME_THREAD_MULTI_ATOMICS #include #endif @@ -142,11 +143,7 @@ class Scheduler { } // Destructor to clean up dynamic names - ~SchedulerItem() { - if (name_is_dynamic) { - delete[] name_.dynamic_name; - } - } + ~SchedulerItem() { clear_dynamic_name(); } // Delete copy operations to prevent accidental copies SchedulerItem(const SchedulerItem &) = delete; @@ -159,13 +156,19 @@ class Scheduler { // Helper to get the name regardless of storage type const char *get_name() const { return name_is_dynamic ? name_.dynamic_name : name_.static_name; } + // Helper to clear dynamic name if allocated + void clear_dynamic_name() { + if (name_is_dynamic && name_.dynamic_name) { + delete[] name_.dynamic_name; + name_.dynamic_name = nullptr; + name_is_dynamic = false; + } + } + // Helper to set name with proper ownership void set_name(const char *name, bool make_copy = false) { // Clean up old dynamic name if any - if (name_is_dynamic && name_.dynamic_name) { - delete[] name_.dynamic_name; - name_is_dynamic = false; - } + clear_dynamic_name(); if (!name) { // nullptr case - no name provided @@ -240,10 +243,13 @@ class Scheduler { void execute_item_(SchedulerItem *item, uint32_t now); // Helper to check if item should be skipped - bool should_skip_item_(const SchedulerItem *item) const { - return item->remove || (item->component != nullptr && item->component->is_failed()); + bool should_skip_item_(SchedulerItem *item) const { + return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed()); } + // Helper to recycle a SchedulerItem + void recycle_item_(std::unique_ptr item); + // Helper to check if item is marked for removal (platform-specific) // Returns true if item should be skipped, handles platform-specific synchronization // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this @@ -280,14 +286,33 @@ class Scheduler { bool has_cancelled_timeout_in_container_(const Container &container, Component *component, const char *name_cstr, bool match_retry) const { for (const auto &item : container) { - if (item->remove && this->matches_item_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry, - /* skip_removed= */ false)) { + if (is_item_removed_(item.get()) && + this->matches_item_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry, + /* skip_removed= */ false)) { return true; } } return false; } + // Template helper to cancel and recycle items from a container + template + size_t cancel_and_recycle_from_container_(Container &container, Component *component, const char *name_cstr, + SchedulerItem::Type type, bool match_retry) { + size_t cancelled = 0; + for (auto it = container.begin(); it != container.end();) { + if (this->matches_item_(*it, component, name_cstr, type, match_retry)) { + // Recycle the cancelled item immediately + this->recycle_item_(std::move(*it)); + it = container.erase(it); + cancelled++; + } else { + ++it; + } + } + return cancelled; + } + Mutex lock_; std::vector> items_; std::vector> to_add_; @@ -297,6 +322,16 @@ class Scheduler { #endif /* ESPHOME_THREAD_SINGLE */ uint32_t to_remove_{0}; + // Memory pool for recycling SchedulerItem objects to reduce heap churn. + // Design decisions: + // - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items + // - The vector grows dynamically up to MAX_POOL_SIZE (8) only when needed, saving memory on simple setups + // - This approach balances memory efficiency for simple configs with performance for complex ones + // - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation + // can stall the entire system, causing timing issues and dropped events for any components that need + // to synchronize between tasks (see https://github.com/esphome/backlog/issues/52) + std::vector> scheduler_item_pool_; + #ifdef ESPHOME_THREAD_MULTI_ATOMICS /* * Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates