1
0
mirror of https://github.com/esphome/esphome.git synced 2025-09-06 13:22:19 +01:00
This commit is contained in:
J. Nick Koston
2025-09-02 11:10:46 -05:00
parent ef33f630c2
commit 05c71bda91
2 changed files with 22 additions and 10 deletions

View File

@@ -14,7 +14,18 @@ namespace esphome {
static const char *const TAG = "scheduler";
// Memory pool configuration constants
// Pool size of 10 is a balance between memory usage and performance:
// - Small enough to not waste memory on simple configs (1-2 timers)
// - Large enough to handle complex setups with multiple sensors/components
// - Prevents system-wide stalls from heap allocation/deallocation that can
// disrupt task synchronization and cause dropped events
static constexpr size_t MAX_POOL_SIZE = 10;
// Maximum number of cancelled items to keep in the heap before forcing a cleanup.
// Set to 6 to trigger cleanup relatively frequently, ensuring cancelled items are
// recycled to the pool in a timely manner to maintain pool efficiency.
static const uint32_t MAX_LOGICALLY_DELETED_ITEMS = 6;
// Half the 32-bit range - used to detect rollovers vs normal time progression
static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2;
// max delay to start an interval sequence
@@ -765,12 +776,6 @@ void Scheduler::recycle_item_(std::unique_ptr<SchedulerItem> item) {
if (!item)
return;
// Pool size of 10 is a balance between memory usage and performance:
// - Small enough to not waste memory on simple configs (1-2 timers)
// - Large enough to handle complex setups with multiple sensors/components
// - Prevents system-wide stalls from heap allocation/deallocation that can
// disrupt task synchronization and cause dropped events
static constexpr size_t MAX_POOL_SIZE = 10;
if (this->scheduler_item_pool_.size() < MAX_POOL_SIZE) {
// Clear callback to release captured resources
item->callback = nullptr;

View File

@@ -47,6 +47,7 @@ async def test_scheduler_pool(
3: loop.create_future(),
4: loop.create_future(),
5: loop.create_future(),
6: loop.create_future(),
}
def check_output(line: str) -> None:
@@ -68,7 +69,7 @@ async def test_scheduler_pool(
new_alloc_count += 1
# Track phase completion
for phase_num in range(1, 6):
for phase_num in range(1, 7):
if (
f"Phase {phase_num} complete" in line
and not phase_futures[phase_num].done()
@@ -100,6 +101,7 @@ async def test_scheduler_pool(
"run_phase_3",
"run_phase_4",
"run_phase_5",
"run_phase_6",
"run_complete",
}
assert expected_services.issubset(service_names), (
@@ -109,7 +111,7 @@ async def test_scheduler_pool(
# Get service objects
phase_services = {
num: next(s for s in services if s.name == f"run_phase_{num}")
for num in range(1, 6)
for num in range(1, 7)
}
complete_service = next(s for s in services if s.name == "run_complete")
@@ -137,7 +139,12 @@ async def test_scheduler_pool(
# Phase 5: Pool reuse verification
client.execute_service(phase_services[5], {})
await asyncio.wait_for(phase_futures[5], timeout=3.0)
await asyncio.sleep(0.5) # Let reuse tests complete
await asyncio.sleep(1.0) # Let Phase 5 timeouts complete and recycle
# Phase 6: Full pool reuse verification
client.execute_service(phase_services[6], {})
await asyncio.wait_for(phase_futures[6], timeout=3.0)
await asyncio.sleep(1.0) # Let Phase 6 timeouts complete
# Complete test
client.execute_service(complete_service, {})
@@ -159,7 +166,7 @@ async def test_scheduler_pool(
)
# Verify all test phases ran
for phase_num in range(1, 6):
for phase_num in range(1, 7):
assert phase_futures[phase_num].done(), f"Phase {phase_num} did not complete"
# Verify pool behavior