mirror of
https://github.com/esphome/esphome.git
synced 2025-11-01 15:41:52 +00:00
Compare commits
7 Commits
climate_st
...
scheduler_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d2249ff8be | ||
|
|
d9d2d2f6b9 | ||
|
|
30f2a4395f | ||
|
|
292abd1187 | ||
|
|
6d0527ff2a | ||
|
|
fd64585f99 | ||
|
|
077cce9848 |
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0
|
||||
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
@@ -86,6 +86,6 @@ jobs:
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0
|
||||
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
||||
@@ -11,7 +11,7 @@ ci:
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.14.2
|
||||
rev: v0.14.3
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
|
||||
@@ -207,14 +207,14 @@ def choose_upload_log_host(
|
||||
if has_mqtt_logging():
|
||||
resolved.append("MQTT")
|
||||
|
||||
if has_api() and has_non_ip_address():
|
||||
if has_api() and has_non_ip_address() and has_resolvable_address():
|
||||
resolved.extend(_resolve_with_cache(CORE.address, purpose))
|
||||
|
||||
elif purpose == Purpose.UPLOADING:
|
||||
if has_ota() and has_mqtt_ip_lookup():
|
||||
resolved.append("MQTTIP")
|
||||
|
||||
if has_ota() and has_non_ip_address():
|
||||
if has_ota() and has_non_ip_address() and has_resolvable_address():
|
||||
resolved.extend(_resolve_with_cache(CORE.address, purpose))
|
||||
else:
|
||||
resolved.append(device)
|
||||
@@ -318,7 +318,17 @@ def has_resolvable_address() -> bool:
|
||||
"""Check if CORE.address is resolvable (via mDNS, DNS, or is an IP address)."""
|
||||
# Any address (IP, mDNS hostname, or regular DNS hostname) is resolvable
|
||||
# The resolve_ip_address() function in helpers.py handles all types via AsyncResolver
|
||||
return CORE.address is not None
|
||||
if CORE.address is None:
|
||||
return False
|
||||
|
||||
if has_ip_address():
|
||||
return True
|
||||
|
||||
if has_mdns():
|
||||
return True
|
||||
|
||||
# .local mDNS hostnames are only resolvable if mDNS is enabled
|
||||
return not CORE.address.endswith(".local")
|
||||
|
||||
|
||||
def mqtt_get_ip(config: ConfigType, username: str, password: str, client_id: str):
|
||||
|
||||
@@ -182,7 +182,7 @@ def validate_automation(extra_schema=None, extra_validators=None, single=False):
|
||||
value = cv.Schema([extra_validators])(value)
|
||||
if single:
|
||||
if len(value) != 1:
|
||||
raise cv.Invalid("Cannot have more than 1 automation for templates")
|
||||
raise cv.Invalid("This trigger allows only a single automation")
|
||||
return value[0]
|
||||
return value
|
||||
|
||||
|
||||
@@ -671,18 +671,33 @@ async def write_image(config, all_frames=False):
|
||||
resize = config.get(CONF_RESIZE)
|
||||
if is_svg_file(path):
|
||||
# Local import so use of non-SVG files needn't require cairosvg installed
|
||||
from pyexpat import ExpatError
|
||||
from xml.etree.ElementTree import ParseError
|
||||
|
||||
from cairosvg import svg2png
|
||||
from cairosvg.helpers import PointError
|
||||
|
||||
if not resize:
|
||||
resize = (None, None)
|
||||
with open(path, "rb") as file:
|
||||
image = svg2png(
|
||||
file_obj=file,
|
||||
output_width=resize[0],
|
||||
output_height=resize[1],
|
||||
)
|
||||
image = Image.open(io.BytesIO(image))
|
||||
width, height = image.size
|
||||
try:
|
||||
with open(path, "rb") as file:
|
||||
image = svg2png(
|
||||
file_obj=file,
|
||||
output_width=resize[0],
|
||||
output_height=resize[1],
|
||||
)
|
||||
image = Image.open(io.BytesIO(image))
|
||||
width, height = image.size
|
||||
except (
|
||||
ValueError,
|
||||
ParseError,
|
||||
IndexError,
|
||||
ExpatError,
|
||||
AttributeError,
|
||||
TypeError,
|
||||
PointError,
|
||||
) as e:
|
||||
raise core.EsphomeError(f"Could not load SVG image {path}: {e}") from e
|
||||
else:
|
||||
image = Image.open(path)
|
||||
width, height = image.size
|
||||
|
||||
@@ -138,6 +138,7 @@ def _concat_nodes_override(values: Iterator[Any]) -> Any:
|
||||
values = chain(head, values)
|
||||
raw = "".join([str(v) for v in values])
|
||||
|
||||
result = None
|
||||
try:
|
||||
# Attempt to parse the concatenated string into a Python literal.
|
||||
# This allows expressions like "1 + 2" to be evaluated to the integer 3.
|
||||
@@ -145,11 +146,16 @@ def _concat_nodes_override(values: Iterator[Any]) -> Any:
|
||||
# fall back to returning the raw string. This is consistent with
|
||||
# Home Assistant's behavior when evaluating templates
|
||||
result = literal_eval(raw)
|
||||
except (ValueError, SyntaxError, MemoryError, TypeError):
|
||||
pass
|
||||
else:
|
||||
if isinstance(result, set):
|
||||
# Sets are not supported, return raw string
|
||||
return raw
|
||||
|
||||
if not isinstance(result, str):
|
||||
return result
|
||||
|
||||
except (ValueError, SyntaxError, MemoryError, TypeError):
|
||||
pass
|
||||
return raw
|
||||
|
||||
|
||||
|
||||
@@ -316,59 +316,37 @@ optional<uint32_t> HOT Scheduler::next_schedule_in(uint32_t now) {
|
||||
return 0;
|
||||
return next_exec - now_64;
|
||||
}
|
||||
|
||||
void Scheduler::full_cleanup_removed_items_() {
|
||||
// We hold the lock for the entire cleanup operation because:
|
||||
// 1. We're rebuilding the entire items_ list, so we need exclusive access throughout
|
||||
// 2. Other threads must see either the old state or the new state, not intermediate states
|
||||
// 3. The operation is already expensive (O(n)), so lock overhead is negligible
|
||||
// 4. No operations inside can block or take other locks, so no deadlock risk
|
||||
LockGuard guard{this->lock_};
|
||||
|
||||
std::vector<std::unique_ptr<SchedulerItem>> valid_items;
|
||||
|
||||
// Move all non-removed items to valid_items, recycle removed ones
|
||||
for (auto &item : this->items_) {
|
||||
if (!is_item_removed_(item.get())) {
|
||||
valid_items.push_back(std::move(item));
|
||||
} else {
|
||||
// Recycle removed items
|
||||
this->recycle_item_(std::move(item));
|
||||
}
|
||||
}
|
||||
|
||||
// Replace items_ with the filtered list
|
||||
this->items_ = std::move(valid_items);
|
||||
// Rebuild the heap structure since items are no longer in heap order
|
||||
std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
|
||||
this->to_remove_ = 0;
|
||||
}
|
||||
|
||||
void HOT Scheduler::call(uint32_t now) {
|
||||
#ifndef ESPHOME_THREAD_SINGLE
|
||||
// Process defer queue first to guarantee FIFO execution order for deferred items.
|
||||
// Previously, defer() used the heap which gave undefined order for equal timestamps,
|
||||
// causing race conditions on multi-core systems (ESP32, BK7200).
|
||||
// With the defer queue:
|
||||
// - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
|
||||
// - Items execute in exact order they were deferred (FIFO guarantee)
|
||||
// - No deferred items exist in to_add_, so processing order doesn't affect correctness
|
||||
// Single-core platforms don't use this queue and fall back to the heap-based approach.
|
||||
//
|
||||
// Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
|
||||
// processed here. They are skipped during execution by should_skip_item_().
|
||||
// This is intentional - no memory leak occurs.
|
||||
//
|
||||
// We use an index (defer_queue_front_) to track the read position instead of calling
|
||||
// erase() on every pop, which would be O(n). The queue is processed once per loop -
|
||||
// any items added during processing are left for the next loop iteration.
|
||||
|
||||
// Snapshot the queue end point - only process items that existed at loop start
|
||||
// Items added during processing (by callbacks or other threads) run next loop
|
||||
// No lock needed: single consumer (main loop), stale read just means we process less this iteration
|
||||
size_t defer_queue_end = this->defer_queue_.size();
|
||||
|
||||
while (this->defer_queue_front_ < defer_queue_end) {
|
||||
std::unique_ptr<SchedulerItem> item;
|
||||
{
|
||||
LockGuard lock(this->lock_);
|
||||
// SAFETY: Moving out the unique_ptr leaves a nullptr in the vector at defer_queue_front_.
|
||||
// This is intentional and safe because:
|
||||
// 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
|
||||
// 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_
|
||||
// and has_cancelled_timeout_in_container_ in scheduler.h)
|
||||
// 3. The lock protects concurrent access, but the nullptr remains until cleanup
|
||||
item = std::move(this->defer_queue_[this->defer_queue_front_]);
|
||||
this->defer_queue_front_++;
|
||||
}
|
||||
|
||||
// Execute callback without holding lock to prevent deadlocks
|
||||
// if the callback tries to call defer() again
|
||||
if (!this->should_skip_item_(item.get())) {
|
||||
now = this->execute_item_(item.get(), now);
|
||||
}
|
||||
// Recycle the defer item after execution
|
||||
this->recycle_item_(std::move(item));
|
||||
}
|
||||
|
||||
// If we've consumed all items up to the snapshot point, clean up the dead space
|
||||
// Single consumer (main loop), so no lock needed for this check
|
||||
if (this->defer_queue_front_ >= defer_queue_end) {
|
||||
LockGuard lock(this->lock_);
|
||||
this->cleanup_defer_queue_locked_();
|
||||
}
|
||||
this->process_defer_queue_(now);
|
||||
#endif /* not ESPHOME_THREAD_SINGLE */
|
||||
|
||||
// Convert the fresh timestamp from main loop to 64-bit for scheduler operations
|
||||
@@ -429,30 +407,7 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
// If we still have too many cancelled items, do a full cleanup
|
||||
// This only happens if cancelled items are stuck in the middle/bottom of the heap
|
||||
if (this->to_remove_ >= MAX_LOGICALLY_DELETED_ITEMS) {
|
||||
// We hold the lock for the entire cleanup operation because:
|
||||
// 1. We're rebuilding the entire items_ list, so we need exclusive access throughout
|
||||
// 2. Other threads must see either the old state or the new state, not intermediate states
|
||||
// 3. The operation is already expensive (O(n)), so lock overhead is negligible
|
||||
// 4. No operations inside can block or take other locks, so no deadlock risk
|
||||
LockGuard guard{this->lock_};
|
||||
|
||||
std::vector<std::unique_ptr<SchedulerItem>> valid_items;
|
||||
|
||||
// Move all non-removed items to valid_items, recycle removed ones
|
||||
for (auto &item : this->items_) {
|
||||
if (!is_item_removed_(item.get())) {
|
||||
valid_items.push_back(std::move(item));
|
||||
} else {
|
||||
// Recycle removed items
|
||||
this->recycle_item_(std::move(item));
|
||||
}
|
||||
}
|
||||
|
||||
// Replace items_ with the filtered list
|
||||
this->items_ = std::move(valid_items);
|
||||
// Rebuild the heap structure since items are no longer in heap order
|
||||
std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
|
||||
this->to_remove_ = 0;
|
||||
this->full_cleanup_removed_items_();
|
||||
}
|
||||
while (!this->items_.empty()) {
|
||||
// Don't copy-by value yet
|
||||
|
||||
@@ -263,7 +263,65 @@ class Scheduler {
|
||||
// Helper to recycle a SchedulerItem
|
||||
void recycle_item_(std::unique_ptr<SchedulerItem> item);
|
||||
|
||||
// Helper to perform full cleanup when too many items are cancelled
|
||||
void full_cleanup_removed_items_();
|
||||
|
||||
#ifndef ESPHOME_THREAD_SINGLE
|
||||
// Helper to process defer queue - inline for performance in hot path
|
||||
inline void process_defer_queue_(uint32_t &now) {
|
||||
// Process defer queue first to guarantee FIFO execution order for deferred items.
|
||||
// Previously, defer() used the heap which gave undefined order for equal timestamps,
|
||||
// causing race conditions on multi-core systems (ESP32, BK7200).
|
||||
// With the defer queue:
|
||||
// - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
|
||||
// - Items execute in exact order they were deferred (FIFO guarantee)
|
||||
// - No deferred items exist in to_add_, so processing order doesn't affect correctness
|
||||
// Single-core platforms don't use this queue and fall back to the heap-based approach.
|
||||
//
|
||||
// Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
|
||||
// processed here. They are skipped during execution by should_skip_item_().
|
||||
// This is intentional - no memory leak occurs.
|
||||
//
|
||||
// We use an index (defer_queue_front_) to track the read position instead of calling
|
||||
// erase() on every pop, which would be O(n). The queue is processed once per loop -
|
||||
// any items added during processing are left for the next loop iteration.
|
||||
|
||||
// Snapshot the queue end point - only process items that existed at loop start
|
||||
// Items added during processing (by callbacks or other threads) run next loop
|
||||
// No lock needed: single consumer (main loop), stale read just means we process less this iteration
|
||||
size_t defer_queue_end = this->defer_queue_.size();
|
||||
|
||||
while (this->defer_queue_front_ < defer_queue_end) {
|
||||
std::unique_ptr<SchedulerItem> item;
|
||||
{
|
||||
LockGuard lock(this->lock_);
|
||||
// SAFETY: Moving out the unique_ptr leaves a nullptr in the vector at defer_queue_front_.
|
||||
// This is intentional and safe because:
|
||||
// 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
|
||||
// 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_
|
||||
// and has_cancelled_timeout_in_container_ in scheduler.h)
|
||||
// 3. The lock protects concurrent access, but the nullptr remains until cleanup
|
||||
item = std::move(this->defer_queue_[this->defer_queue_front_]);
|
||||
this->defer_queue_front_++;
|
||||
}
|
||||
|
||||
// Execute callback without holding lock to prevent deadlocks
|
||||
// if the callback tries to call defer() again
|
||||
if (!this->should_skip_item_(item.get())) {
|
||||
now = this->execute_item_(item.get(), now);
|
||||
}
|
||||
// Recycle the defer item after execution
|
||||
this->recycle_item_(std::move(item));
|
||||
}
|
||||
|
||||
// If we've consumed all items up to the snapshot point, clean up the dead space
|
||||
// Single consumer (main loop), so no lock needed for this check
|
||||
if (this->defer_queue_front_ >= defer_queue_end) {
|
||||
LockGuard lock(this->lock_);
|
||||
this->cleanup_defer_queue_locked_();
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to cleanup defer_queue_ after processing
|
||||
// IMPORTANT: Caller must hold the scheduler lock before calling this function.
|
||||
inline void cleanup_defer_queue_locked_() {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
pylint==4.0.2
|
||||
flake8==7.3.0 # also change in .pre-commit-config.yaml when updating
|
||||
ruff==0.14.2 # also change in .pre-commit-config.yaml when updating
|
||||
ruff==0.14.3 # also change in .pre-commit-config.yaml when updating
|
||||
pyupgrade==3.21.0 # also change in .pre-commit-config.yaml when updating
|
||||
pre-commit
|
||||
|
||||
|
||||
@@ -33,3 +33,4 @@ test_list:
|
||||
{{{ "x", "79"}, { "y", "82"}}}
|
||||
- '{{{"AA"}}}'
|
||||
- '"HELLO"'
|
||||
- '{ 79, 82 }'
|
||||
|
||||
@@ -34,3 +34,4 @@ test_list:
|
||||
{{{ "x", "${ position.x }"}, { "y", "${ position.y }"}}}
|
||||
- ${ '{{{"AA"}}}' }
|
||||
- ${ '"HELLO"' }
|
||||
- '{ ${position.x}, ${position.y} }'
|
||||
|
||||
@@ -744,7 +744,7 @@ def test_choose_upload_log_host_ota_local_all_options() -> None:
|
||||
check_default=None,
|
||||
purpose=Purpose.UPLOADING,
|
||||
)
|
||||
assert result == ["MQTTIP", "test.local"]
|
||||
assert result == ["MQTTIP"]
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mock_serial_ports")
|
||||
@@ -794,7 +794,7 @@ def test_choose_upload_log_host_ota_local_all_options_logging() -> None:
|
||||
check_default=None,
|
||||
purpose=Purpose.LOGGING,
|
||||
)
|
||||
assert result == ["MQTTIP", "MQTT", "test.local"]
|
||||
assert result == ["MQTTIP", "MQTT"]
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mock_no_mqtt_logging")
|
||||
@@ -1564,7 +1564,7 @@ def test_has_resolvable_address() -> None:
|
||||
setup_core(
|
||||
config={CONF_MDNS: {CONF_DISABLED: True}}, address="esphome-device.local"
|
||||
)
|
||||
assert has_resolvable_address() is True
|
||||
assert has_resolvable_address() is False
|
||||
|
||||
# Test with mDNS disabled and regular DNS hostname (resolvable)
|
||||
setup_core(config={CONF_MDNS: {CONF_DISABLED: True}}, address="device.example.com")
|
||||
|
||||
Reference in New Issue
Block a user