mirror of
https://github.com/esphome/esphome.git
synced 2025-11-18 15:55:46 +00:00
fixes
This commit is contained in:
109
tests/integration/fixtures/wait_until_mid_loop_timing.yaml
Normal file
109
tests/integration/fixtures/wait_until_mid_loop_timing.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
# Test for PR #11676 bug: wait_until timeout when triggered mid-component-loop
|
||||
# This demonstrates that App.get_loop_component_start_time() is stale when
|
||||
# wait_until is triggered partway through a component's loop execution
|
||||
|
||||
esphome:
|
||||
name: wait-mid-loop
|
||||
|
||||
host:
|
||||
|
||||
api:
|
||||
actions:
|
||||
- action: test_mid_loop_timeout
|
||||
then:
|
||||
- logger.log: "=== Test: wait_until triggered mid-loop should timeout correctly ==="
|
||||
|
||||
# Reset test state
|
||||
- globals.set:
|
||||
id: test_complete
|
||||
value: 'false'
|
||||
|
||||
# Trigger the slow script that will call wait_until mid-execution
|
||||
- script.execute: slow_script
|
||||
|
||||
# Wait for test to complete (should take ~300ms: 100ms delay + 200ms timeout)
|
||||
- wait_until:
|
||||
condition:
|
||||
lambda: return id(test_complete);
|
||||
timeout: 2s
|
||||
|
||||
- if:
|
||||
condition:
|
||||
lambda: return id(test_complete);
|
||||
then:
|
||||
- logger.log: "✓ Test PASSED: wait_until timed out correctly"
|
||||
else:
|
||||
- logger.log: "✗ Test FAILED: wait_until did not complete properly"
|
||||
|
||||
logger:
|
||||
level: DEBUG
|
||||
|
||||
globals:
|
||||
- id: test_complete
|
||||
type: bool
|
||||
restore_value: false
|
||||
initial_value: 'false'
|
||||
|
||||
- id: test_condition
|
||||
type: bool
|
||||
restore_value: false
|
||||
initial_value: 'false'
|
||||
|
||||
- id: timeout_start_time
|
||||
type: uint32_t
|
||||
restore_value: false
|
||||
initial_value: '0'
|
||||
|
||||
- id: timeout_end_time
|
||||
type: uint32_t
|
||||
restore_value: false
|
||||
initial_value: '0'
|
||||
|
||||
script:
|
||||
# This script simulates a component that takes time during its execution
|
||||
# When wait_until is triggered mid-script, the loop_component_start_time
|
||||
# will be stale (from when the script's component loop started)
|
||||
- id: slow_script
|
||||
then:
|
||||
- logger.log: "Script: Starting, about to do some work..."
|
||||
|
||||
# Simulate component doing work for 100ms
|
||||
# This represents time spent in a component's loop() before triggering wait_until
|
||||
- delay: 100ms
|
||||
|
||||
- logger.log: "Script: 100ms elapsed, now starting wait_until with 200ms timeout"
|
||||
- lambda: |-
|
||||
// Record when timeout starts
|
||||
id(timeout_start_time) = millis();
|
||||
id(test_condition) = false;
|
||||
|
||||
# At this point:
|
||||
# - Script component's loop started 100ms ago
|
||||
# - App.loop_component_start_time_ = time from 100ms ago (stale!)
|
||||
# - wait_until will capture millis() NOW (fresh)
|
||||
# - BUG: loop() will use stale loop_component_start_time, causing immediate timeout
|
||||
|
||||
- wait_until:
|
||||
condition:
|
||||
lambda: return id(test_condition);
|
||||
timeout: 200ms
|
||||
|
||||
- lambda: |-
|
||||
// Record when timeout completes
|
||||
id(timeout_end_time) = millis();
|
||||
uint32_t elapsed = id(timeout_end_time) - id(timeout_start_time);
|
||||
|
||||
ESP_LOGD("TEST", "wait_until completed after %u ms (expected ~200ms)", elapsed);
|
||||
|
||||
// Check if timeout took approximately correct time
|
||||
// Should be ~200ms, not <50ms (immediate timeout)
|
||||
if (elapsed >= 150 && elapsed <= 250) {
|
||||
ESP_LOGD("TEST", "✓ Timeout duration correct: %u ms", elapsed);
|
||||
id(test_complete) = true;
|
||||
} else {
|
||||
ESP_LOGE("TEST", "✗ Timeout duration WRONG: %u ms (expected 150-250ms)", elapsed);
|
||||
if (elapsed < 50) {
|
||||
ESP_LOGE("TEST", " → Likely BUG: Immediate timeout due to stale loop_component_start_time");
|
||||
}
|
||||
id(test_complete) = false;
|
||||
}
|
||||
115
tests/integration/test_wait_until_mid_loop_timing.py
Normal file
115
tests/integration/test_wait_until_mid_loop_timing.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""Integration test for PR #11676 mid-loop timing bug.
|
||||
|
||||
This test validates that wait_until timeouts work correctly when triggered
|
||||
mid-component-loop, where App.get_loop_component_start_time() is stale.
|
||||
|
||||
The bug: When wait_until is triggered partway through a component's loop execution
|
||||
(e.g., from a script or automation), the cached loop_component_start_time_ is stale
|
||||
relative to when the action was actually triggered. This causes timeout calculations
|
||||
to underflow and timeout immediately instead of waiting the specified duration.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
from .types import APIClientConnectedFactory, RunCompiledFunction
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.xfail(
|
||||
reason="PR #11676 causes wait_until to timeout immediately when triggered mid-loop"
|
||||
)
|
||||
async def test_wait_until_mid_loop_timing(
|
||||
yaml_config: str,
|
||||
run_compiled: RunCompiledFunction,
|
||||
api_client_connected: APIClientConnectedFactory,
|
||||
) -> None:
|
||||
"""Test that wait_until timeout works when triggered mid-component-loop.
|
||||
|
||||
This test:
|
||||
1. Executes a script that delays 100ms (simulating component work)
|
||||
2. Then starts wait_until with 200ms timeout
|
||||
3. Verifies timeout takes ~200ms, not <50ms (immediate timeout bug)
|
||||
"""
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
# Track test results
|
||||
test_results = {
|
||||
"timeout_duration": None,
|
||||
"passed": False,
|
||||
"failed": False,
|
||||
"bug_detected": False,
|
||||
}
|
||||
|
||||
# Patterns for log messages
|
||||
timeout_duration = re.compile(r"wait_until completed after (\d+) ms")
|
||||
test_pass = re.compile(r"✓ Timeout duration correct")
|
||||
test_fail = re.compile(r"✗ Timeout duration WRONG")
|
||||
bug_pattern = re.compile(r"Likely BUG: Immediate timeout")
|
||||
test_passed = re.compile(r"✓ Test PASSED")
|
||||
test_failed = re.compile(r"✗ Test FAILED")
|
||||
|
||||
test_complete = loop.create_future()
|
||||
|
||||
def check_output(line: str) -> None:
|
||||
"""Check log output for test results."""
|
||||
# Extract timeout duration
|
||||
match = timeout_duration.search(line)
|
||||
if match:
|
||||
test_results["timeout_duration"] = int(match.group(1))
|
||||
|
||||
if test_pass.search(line):
|
||||
test_results["passed"] = True
|
||||
if test_fail.search(line):
|
||||
test_results["failed"] = True
|
||||
if bug_pattern.search(line):
|
||||
test_results["bug_detected"] = True
|
||||
|
||||
# Final test result
|
||||
if (
|
||||
test_passed.search(line)
|
||||
or test_failed.search(line)
|
||||
and not test_complete.done()
|
||||
):
|
||||
test_complete.set_result(True)
|
||||
|
||||
async with (
|
||||
run_compiled(yaml_config, line_callback=check_output),
|
||||
api_client_connected() as client,
|
||||
):
|
||||
# Get the test service
|
||||
_, services = await client.list_entities_services()
|
||||
test_service = next(
|
||||
(s for s in services if s.name == "test_mid_loop_timeout"), None
|
||||
)
|
||||
assert test_service is not None, "test_mid_loop_timeout service not found"
|
||||
|
||||
# Execute the test
|
||||
client.execute_service(test_service, {})
|
||||
|
||||
# Wait for test to complete (100ms delay + 200ms timeout + margins = ~500ms)
|
||||
await asyncio.wait_for(test_complete, timeout=5.0)
|
||||
|
||||
# Verify results
|
||||
assert test_results["timeout_duration"] is not None, (
|
||||
"Timeout duration not reported"
|
||||
)
|
||||
assert test_results["passed"], (
|
||||
f"Test failed: wait_until took {test_results['timeout_duration']}ms, expected ~200ms. "
|
||||
f"Bug detected: {test_results['bug_detected']}"
|
||||
)
|
||||
assert not test_results["bug_detected"], (
|
||||
f"BUG DETECTED: wait_until timed out immediately ({test_results['timeout_duration']}ms) "
|
||||
"instead of waiting 200ms. This indicates stale loop_component_start_time."
|
||||
)
|
||||
|
||||
# Additional validation: timeout should be ~200ms (150-250ms range)
|
||||
duration = test_results["timeout_duration"]
|
||||
assert 150 <= duration <= 250, (
|
||||
f"Timeout duration {duration}ms outside expected range (150-250ms). "
|
||||
f"This suggests timing regression from PR #11676."
|
||||
)
|
||||
Reference in New Issue
Block a user