1
0
mirror of https://github.com/esphome/esphome.git synced 2025-11-03 16:41:50 +00:00

Compare commits

..

7 Commits

Author SHA1 Message Date
J. Nick Koston
7504219a2d fix 2025-10-30 23:20:27 -05:00
J. Nick Koston
6947df56d5 wip 2025-10-30 23:14:20 -05:00
J. Nick Koston
29c97686fa wip 2025-10-30 23:12:26 -05:00
J. Nick Koston
f3087abd55 [display] Optimize display writers with function pointers for stateless lambdas 2025-10-30 23:08:23 -05:00
J. Nick Koston
f55dfc7f5d wip 2025-10-30 22:57:07 -05:00
J. Nick Koston
c5a9d30362 wip 2025-10-30 22:57:00 -05:00
J. Nick Koston
fae06133d4 wip 2025-10-30 22:53:14 -05:00
91 changed files with 806 additions and 2518 deletions

View File

@@ -192,11 +192,6 @@ jobs:
with:
python-version: ${{ env.DEFAULT_PYTHON }}
cache-key: ${{ needs.common.outputs.cache-key }}
- name: Restore components graph cache
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: .temp/components_graph.json
key: components-graph-${{ hashFiles('esphome/components/**/*.py') }}
- name: Determine which tests to run
id: determine
env:
@@ -221,12 +216,6 @@ jobs:
echo "cpp-unit-tests-run-all=$(echo "$output" | jq -r '.cpp_unit_tests_run_all')" >> $GITHUB_OUTPUT
echo "cpp-unit-tests-components=$(echo "$output" | jq -c '.cpp_unit_tests_components')" >> $GITHUB_OUTPUT
echo "component-test-batches=$(echo "$output" | jq -c '.component_test_batches')" >> $GITHUB_OUTPUT
- name: Save components graph cache
if: github.ref == 'refs/heads/dev'
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: .temp/components_graph.json
key: components-graph-${{ hashFiles('esphome/components/**/*.py') }}
integration-tests:
name: Run integration tests

View File

@@ -11,7 +11,7 @@ ci:
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.14.3
rev: v0.14.2
hooks:
# Run the linter.
- id: ruff

View File

@@ -15,7 +15,7 @@ from esphome.const import (
CONF_TYPE_ID,
CONF_UPDATE_INTERVAL,
)
from esphome.core import ID, Lambda
from esphome.core import ID
from esphome.cpp_generator import (
LambdaExpression,
MockObj,
@@ -182,7 +182,7 @@ def validate_automation(extra_schema=None, extra_validators=None, single=False):
value = cv.Schema([extra_validators])(value)
if single:
if len(value) != 1:
raise cv.Invalid("This trigger allows only a single automation")
raise cv.Invalid("Cannot have more than 1 automation for templates")
return value[0]
return value
@@ -310,30 +310,6 @@ async def for_condition_to_code(
return var
@register_condition(
"component.is_idle",
LambdaCondition,
maybe_simple_id(
{
cv.Required(CONF_ID): cv.use_id(cg.Component),
}
),
)
async def component_is_idle_condition_to_code(
config: ConfigType,
condition_id: ID,
template_arg: cg.TemplateArguments,
args: TemplateArgsType,
) -> MockObj:
comp = await cg.get_variable(config[CONF_ID])
lambda_ = await cg.process_lambda(
Lambda(f"return {comp}->is_idle();"), args, return_type=bool
)
return new_lambda_pvariable(
condition_id, lambda_, StatelessLambdaCondition, template_arg
)
@register_action(
"delay", DelayAction, cv.templatable(cv.positive_time_period_milliseconds)
)

View File

@@ -425,7 +425,7 @@ message ListEntitiesFanResponse {
bool disabled_by_default = 9;
string icon = 10 [(field_ifdef) = "USE_ENTITY_ICON"];
EntityCategory entity_category = 11;
repeated string supported_preset_modes = 12 [(container_pointer_no_template) = "std::vector<const char *>"];
repeated string supported_preset_modes = 12 [(container_pointer) = "std::set"];
uint32 device_id = 13 [(field_ifdef) = "USE_DEVICES"];
}
// Deprecated in API version 1.6 - only used in deprecated fields

View File

@@ -423,7 +423,7 @@ uint16_t APIConnection::try_send_fan_info(EntityBase *entity, APIConnection *con
msg.supports_speed = traits.supports_speed();
msg.supports_direction = traits.supports_direction();
msg.supported_speed_count = traits.supported_speed_count();
msg.supported_preset_modes = &traits.supported_preset_modes();
msg.supported_preset_modes = &traits.supported_preset_modes_for_api_();
return fill_and_encode_entity_info(fan, msg, ListEntitiesFanResponse::MESSAGE_TYPE, conn, remaining_size, is_single);
}
void APIConnection::fan_command(const FanCommandRequest &msg) {

View File

@@ -434,7 +434,8 @@ APIError APINoiseFrameHelper::write_protobuf_packets(ProtoWriteBuffer buffer, st
return APIError::OK;
}
uint8_t *buffer_data = buffer.get_buffer()->data();
std::vector<uint8_t> *raw_buffer = buffer.get_buffer();
uint8_t *buffer_data = raw_buffer->data(); // Cache buffer pointer
this->reusable_iovs_.clear();
this->reusable_iovs_.reserve(packets.size());

View File

@@ -230,7 +230,8 @@ APIError APIPlaintextFrameHelper::write_protobuf_packets(ProtoWriteBuffer buffer
return APIError::OK;
}
uint8_t *buffer_data = buffer.get_buffer()->data();
std::vector<uint8_t> *raw_buffer = buffer.get_buffer();
uint8_t *buffer_data = raw_buffer->data(); // Cache buffer pointer
this->reusable_iovs_.clear();
this->reusable_iovs_.reserve(packets.size());

View File

@@ -355,8 +355,8 @@ void ListEntitiesFanResponse::encode(ProtoWriteBuffer buffer) const {
buffer.encode_string(10, this->icon_ref_);
#endif
buffer.encode_uint32(11, static_cast<uint32_t>(this->entity_category));
for (const char *it : *this->supported_preset_modes) {
buffer.encode_string(12, it, strlen(it), true);
for (const auto &it : *this->supported_preset_modes) {
buffer.encode_string(12, it, true);
}
#ifdef USE_DEVICES
buffer.encode_uint32(13, this->device_id);
@@ -376,8 +376,8 @@ void ListEntitiesFanResponse::calculate_size(ProtoSize &size) const {
#endif
size.add_uint32(1, static_cast<uint32_t>(this->entity_category));
if (!this->supported_preset_modes->empty()) {
for (const char *it : *this->supported_preset_modes) {
size.add_length_force(1, strlen(it));
for (const auto &it : *this->supported_preset_modes) {
size.add_length_force(1, it.size());
}
}
#ifdef USE_DEVICES

View File

@@ -725,7 +725,7 @@ class ListEntitiesFanResponse final : public InfoResponseProtoMessage {
bool supports_speed{false};
bool supports_direction{false};
int32_t supported_speed_count{0};
const std::vector<const char *> *supported_preset_modes{};
const std::set<std::string> *supported_preset_modes{};
void encode(ProtoWriteBuffer buffer) const override;
void calculate_size(ProtoSize &size) const override;
#ifdef HAS_PROTO_MESSAGE_DUMP

View File

@@ -77,9 +77,6 @@ void BLESensor::gattc_event_handler(esp_gattc_cb_event_t event, esp_gatt_if_t ga
}
} else {
this->node_state = espbt::ClientState::ESTABLISHED;
// For non-notify characteristics, trigger an immediate read after service discovery
// to avoid peripherals disconnecting due to inactivity
this->update();
}
break;
}

View File

@@ -79,9 +79,6 @@ void BLETextSensor::gattc_event_handler(esp_gattc_cb_event_t event, esp_gatt_if_
}
} else {
this->node_state = espbt::ClientState::ESTABLISHED;
// For non-notify characteristics, trigger an immediate read after service discovery
// to avoid peripherals disconnecting due to inactivity
this->update();
}
break;
}

View File

@@ -176,7 +176,117 @@ class Display;
class DisplayPage;
class DisplayOnPageChangeTrigger;
using display_writer_t = std::function<void(Display &)>;
/** Optimized display writer that uses function pointers for stateless lambdas.
*
* Similar to TemplatableValue but specialized for display writer callbacks.
* Saves ~8 bytes per stateless lambda on 32-bit platforms (16 bytes std::function → ~8 bytes discriminator+pointer).
*
* Supports both:
* - Stateless lambdas (from YAML) → function pointer (4 bytes)
* - Stateful lambdas/std::function (from C++ code) → std::function* (heap allocated)
*
* @tparam T The display type (e.g., Display, Nextion, GPIOLCDDisplay)
*/
template<typename T> class DisplayWriter {
public:
DisplayWriter() : type_(NONE) {}
// For stateless lambdas (convertible to function pointer): use function pointer (4 bytes)
template<typename F>
DisplayWriter(F f) requires std::invocable<F, T &> && std::convertible_to<F, void (*)(T &)>
: type_(STATELESS_LAMBDA) {
this->stateless_f_ = f; // Implicit conversion to function pointer
}
// For stateful lambdas and std::function (not convertible to function pointer): use std::function* (heap allocated)
// This handles backwards compatibility with external components
template<typename F>
DisplayWriter(F f) requires std::invocable<F, T &> &&(!std::convertible_to<F, void (*)(T &)>) : type_(LAMBDA) {
this->f_ = new std::function<void(T &)>(std::move(f));
}
// Copy constructor
DisplayWriter(const DisplayWriter &other) : type_(other.type_) {
if (type_ == LAMBDA) {
this->f_ = new std::function<void(T &)>(*other.f_);
} else if (type_ == STATELESS_LAMBDA) {
this->stateless_f_ = other.stateless_f_;
}
}
// Move constructor
DisplayWriter(DisplayWriter &&other) noexcept : type_(other.type_) {
if (type_ == LAMBDA) {
this->f_ = other.f_;
other.f_ = nullptr;
} else if (type_ == STATELESS_LAMBDA) {
this->stateless_f_ = other.stateless_f_;
}
other.type_ = NONE;
}
// Assignment operators
DisplayWriter &operator=(const DisplayWriter &other) {
if (this != &other) {
this->~DisplayWriter();
new (this) DisplayWriter(other);
}
return *this;
}
DisplayWriter &operator=(DisplayWriter &&other) noexcept {
if (this != &other) {
this->~DisplayWriter();
new (this) DisplayWriter(std::move(other));
}
return *this;
}
~DisplayWriter() {
if (type_ == LAMBDA) {
delete this->f_;
}
// STATELESS_LAMBDA/NONE: no cleanup needed (function pointer or empty)
}
bool has_value() const { return this->type_ != NONE; }
void call(T &display) const {
switch (this->type_) {
case STATELESS_LAMBDA:
this->stateless_f_(display); // Direct function pointer call
break;
case LAMBDA:
(*this->f_)(display); // std::function call
break;
case NONE:
default:
break;
}
}
// Operator() for convenience
void operator()(T &display) const { this->call(display); }
// Operator* for backwards compatibility with (*writer_)(*this) pattern
DisplayWriter &operator*() { return *this; }
const DisplayWriter &operator*() const { return *this; }
protected:
enum : uint8_t {
NONE,
LAMBDA,
STATELESS_LAMBDA,
} type_;
union {
std::function<void(T &)> *f_;
void (*stateless_f_)(T &);
};
};
// Type alias for Display writer - uses optimized DisplayWriter instead of std::function
using display_writer_t = DisplayWriter<Display>;
#define LOG_DISPLAY(prefix, type, obj) \
if ((obj) != nullptr) { \
@@ -678,7 +788,7 @@ class Display : public PollingComponent {
void sort_triangle_points_by_y_(int *x1, int *y1, int *x2, int *y2, int *x3, int *y3);
DisplayRotation rotation_{DISPLAY_ROTATION_0_DEGREES};
optional<display_writer_t> writer_{};
display_writer_t writer_{};
DisplayPage *page_{nullptr};
DisplayPage *previous_page_{nullptr};
std::vector<DisplayOnPageChangeTrigger *> on_page_change_triggers_;

View File

@@ -96,11 +96,7 @@ void loop_task(void *pv_params) {
extern "C" void app_main() {
esp32::setup_preferences();
#if CONFIG_FREERTOS_UNICORE
xTaskCreate(loop_task, "loopTask", 8192, nullptr, 1, &loop_task_handle);
#else
xTaskCreatePinnedToCore(loop_task, "loopTask", 8192, nullptr, 1, &loop_task_handle, 1);
#endif
}
#endif // USE_ESP_IDF

View File

@@ -7,7 +7,6 @@ from typing import Any
from esphome import automation
import esphome.codegen as cg
from esphome.components import socket
from esphome.components.esp32 import add_idf_sdkconfig_option, const, get_esp32_variant
import esphome.config_validation as cv
from esphome.const import (
@@ -482,14 +481,6 @@ async def to_code(config):
cg.add(var.set_name(name))
await cg.register_component(var, config)
# BLE uses 1 UDP socket for event notification to wake up main loop from select()
# This enables low-latency (~12μs) BLE event processing instead of waiting for
# select() timeout (0-16ms). The socket is created in ble_setup_() and used to
# wake lwip_select() when BLE events arrive from the BLE thread.
# Note: Called during config generation, socket is created at runtime. In practice,
# always used since esp32_ble only runs on ESP32 which always has USE_SOCKET_SELECT_SUPPORT.
socket.consume_sockets(1, "esp32_ble")(config)
# Define max connections for use in C++ code (e.g., ble_server.h)
max_connections = config.get(CONF_MAX_CONNECTIONS, DEFAULT_MAX_CONNECTIONS)
cg.add_define("USE_ESP32_BLE_MAX_CONNECTIONS", max_connections)

View File

@@ -27,34 +27,10 @@ extern "C" {
#include <esp32-hal-bt.h>
#endif
#ifdef USE_SOCKET_SELECT_SUPPORT
#include <lwip/sockets.h>
#endif
namespace esphome::esp32_ble {
static const char *const TAG = "esp32_ble";
// GAP event groups for deduplication across gap_event_handler and dispatch_gap_event_
#define GAP_SCAN_COMPLETE_EVENTS \
case ESP_GAP_BLE_SCAN_PARAM_SET_COMPLETE_EVT: \
case ESP_GAP_BLE_SCAN_START_COMPLETE_EVT: \
case ESP_GAP_BLE_SCAN_STOP_COMPLETE_EVT
#define GAP_ADV_COMPLETE_EVENTS \
case ESP_GAP_BLE_ADV_DATA_SET_COMPLETE_EVT: \
case ESP_GAP_BLE_SCAN_RSP_DATA_SET_COMPLETE_EVT: \
case ESP_GAP_BLE_ADV_DATA_RAW_SET_COMPLETE_EVT: \
case ESP_GAP_BLE_ADV_START_COMPLETE_EVT: \
case ESP_GAP_BLE_ADV_STOP_COMPLETE_EVT
#define GAP_SECURITY_EVENTS \
case ESP_GAP_BLE_AUTH_CMPL_EVT: \
case ESP_GAP_BLE_SEC_REQ_EVT: \
case ESP_GAP_BLE_PASSKEY_NOTIF_EVT: \
case ESP_GAP_BLE_PASSKEY_REQ_EVT: \
case ESP_GAP_BLE_NC_REQ_EVT
void ESP32BLE::setup() {
global_ble = this;
if (!ble_pre_setup_()) {
@@ -297,21 +273,10 @@ bool ESP32BLE::ble_setup_() {
// BLE takes some time to be fully set up, 200ms should be more than enough
delay(200); // NOLINT
// Set up notification socket to wake main loop for BLE events
// This enables low-latency (~12μs) event processing instead of waiting for select() timeout
#ifdef USE_SOCKET_SELECT_SUPPORT
this->setup_event_notification_();
#endif
return true;
}
bool ESP32BLE::ble_dismantle_() {
// Clean up notification socket first before dismantling BLE stack
#ifdef USE_SOCKET_SELECT_SUPPORT
this->cleanup_event_notification_();
#endif
esp_err_t err = esp_bluedroid_disable();
if (err != ESP_OK) {
ESP_LOGE(TAG, "esp_bluedroid_disable failed: %d", err);
@@ -409,12 +374,6 @@ void ESP32BLE::loop() {
break;
}
#ifdef USE_SOCKET_SELECT_SUPPORT
// Drain any notification socket events first
// This clears the socket so it doesn't stay "ready" in subsequent select() calls
this->drain_event_notifications_();
#endif
BLEEvent *ble_event = this->ble_events_.pop();
while (ble_event != nullptr) {
switch (ble_event->type_) {
@@ -455,48 +414,60 @@ void ESP32BLE::loop() {
break;
// Scan complete events
GAP_SCAN_COMPLETE_EVENTS:
// Advertising complete events
GAP_ADV_COMPLETE_EVENTS:
// RSSI complete event
case ESP_GAP_BLE_READ_RSSI_COMPLETE_EVT:
// Security events
GAP_SECURITY_EVENTS:
case ESP_GAP_BLE_SCAN_PARAM_SET_COMPLETE_EVT:
case ESP_GAP_BLE_SCAN_START_COMPLETE_EVT:
case ESP_GAP_BLE_SCAN_STOP_COMPLETE_EVT:
// All three scan complete events have the same structure with just status
// The scan_complete struct matches ESP-IDF's layout exactly, so this reinterpret_cast is safe
// This is verified at compile-time by static_assert checks in ble_event.h
// The struct already contains our copy of the status (copied in BLEEvent constructor)
ESP_LOGV(TAG, "gap_event_handler - %d", gap_event);
#ifdef ESPHOME_ESP32_BLE_GAP_EVENT_HANDLER_COUNT
{
esp_ble_gap_cb_param_t *param;
// clang-format off
switch (gap_event) {
// All three scan complete events have the same structure with just status
// The scan_complete struct matches ESP-IDF's layout exactly, so this reinterpret_cast is safe
// This is verified at compile-time by static_assert checks in ble_event.h
// The struct already contains our copy of the status (copied in BLEEvent constructor)
GAP_SCAN_COMPLETE_EVENTS:
param = reinterpret_cast<esp_ble_gap_cb_param_t *>(&ble_event->event_.gap.scan_complete);
break;
for (auto *gap_handler : this->gap_event_handlers_) {
gap_handler->gap_event_handler(
gap_event, reinterpret_cast<esp_ble_gap_cb_param_t *>(&ble_event->event_.gap.scan_complete));
}
#endif
break;
// All advertising complete events have the same structure with just status
GAP_ADV_COMPLETE_EVENTS:
param = reinterpret_cast<esp_ble_gap_cb_param_t *>(&ble_event->event_.gap.adv_complete);
break;
// Advertising complete events
case ESP_GAP_BLE_ADV_DATA_SET_COMPLETE_EVT:
case ESP_GAP_BLE_SCAN_RSP_DATA_SET_COMPLETE_EVT:
case ESP_GAP_BLE_ADV_DATA_RAW_SET_COMPLETE_EVT:
case ESP_GAP_BLE_ADV_START_COMPLETE_EVT:
case ESP_GAP_BLE_ADV_STOP_COMPLETE_EVT:
// All advertising complete events have the same structure with just status
ESP_LOGV(TAG, "gap_event_handler - %d", gap_event);
#ifdef ESPHOME_ESP32_BLE_GAP_EVENT_HANDLER_COUNT
for (auto *gap_handler : this->gap_event_handlers_) {
gap_handler->gap_event_handler(
gap_event, reinterpret_cast<esp_ble_gap_cb_param_t *>(&ble_event->event_.gap.adv_complete));
}
#endif
break;
case ESP_GAP_BLE_READ_RSSI_COMPLETE_EVT:
param = reinterpret_cast<esp_ble_gap_cb_param_t *>(&ble_event->event_.gap.read_rssi_complete);
break;
// RSSI complete event
case ESP_GAP_BLE_READ_RSSI_COMPLETE_EVT:
ESP_LOGV(TAG, "gap_event_handler - %d", gap_event);
#ifdef ESPHOME_ESP32_BLE_GAP_EVENT_HANDLER_COUNT
for (auto *gap_handler : this->gap_event_handlers_) {
gap_handler->gap_event_handler(
gap_event, reinterpret_cast<esp_ble_gap_cb_param_t *>(&ble_event->event_.gap.read_rssi_complete));
}
#endif
break;
GAP_SECURITY_EVENTS:
param = reinterpret_cast<esp_ble_gap_cb_param_t *>(&ble_event->event_.gap.security);
break;
default:
break;
}
// clang-format on
// Dispatch to all registered handlers
for (auto *gap_handler : this->gap_event_handlers_) {
gap_handler->gap_event_handler(gap_event, param);
}
// Security events
case ESP_GAP_BLE_AUTH_CMPL_EVT:
case ESP_GAP_BLE_SEC_REQ_EVT:
case ESP_GAP_BLE_PASSKEY_NOTIF_EVT:
case ESP_GAP_BLE_PASSKEY_REQ_EVT:
case ESP_GAP_BLE_NC_REQ_EVT:
ESP_LOGV(TAG, "gap_event_handler - %d", gap_event);
#ifdef ESPHOME_ESP32_BLE_GAP_EVENT_HANDLER_COUNT
for (auto *gap_handler : this->gap_event_handlers_) {
gap_handler->gap_event_handler(
gap_event, reinterpret_cast<esp_ble_gap_cb_param_t *>(&ble_event->event_.gap.security));
}
#endif
break;
@@ -576,13 +547,23 @@ void ESP32BLE::gap_event_handler(esp_gap_ble_cb_event_t event, esp_ble_gap_cb_pa
// Queue GAP events that components need to handle
// Scanning events - used by esp32_ble_tracker
case ESP_GAP_BLE_SCAN_RESULT_EVT:
GAP_SCAN_COMPLETE_EVENTS:
case ESP_GAP_BLE_SCAN_PARAM_SET_COMPLETE_EVT:
case ESP_GAP_BLE_SCAN_START_COMPLETE_EVT:
case ESP_GAP_BLE_SCAN_STOP_COMPLETE_EVT:
// Advertising events - used by esp32_ble_beacon and esp32_ble server
GAP_ADV_COMPLETE_EVENTS:
case ESP_GAP_BLE_ADV_DATA_SET_COMPLETE_EVT:
case ESP_GAP_BLE_SCAN_RSP_DATA_SET_COMPLETE_EVT:
case ESP_GAP_BLE_ADV_DATA_RAW_SET_COMPLETE_EVT:
case ESP_GAP_BLE_ADV_START_COMPLETE_EVT:
case ESP_GAP_BLE_ADV_STOP_COMPLETE_EVT:
// Connection events - used by ble_client
case ESP_GAP_BLE_READ_RSSI_COMPLETE_EVT:
// Security events - used by ble_client and bluetooth_proxy
GAP_SECURITY_EVENTS:
case ESP_GAP_BLE_AUTH_CMPL_EVT:
case ESP_GAP_BLE_SEC_REQ_EVT:
case ESP_GAP_BLE_PASSKEY_NOTIF_EVT:
case ESP_GAP_BLE_PASSKEY_REQ_EVT:
case ESP_GAP_BLE_NC_REQ_EVT:
enqueue_ble_event(event, param);
return;
@@ -603,10 +584,6 @@ void ESP32BLE::gap_event_handler(esp_gap_ble_cb_event_t event, esp_ble_gap_cb_pa
void ESP32BLE::gatts_event_handler(esp_gatts_cb_event_t event, esp_gatt_if_t gatts_if,
esp_ble_gatts_cb_param_t *param) {
enqueue_ble_event(event, gatts_if, param);
// Wake up main loop to process GATT event immediately
#ifdef USE_SOCKET_SELECT_SUPPORT
global_ble->notify_main_loop_();
#endif
}
#endif
@@ -614,10 +591,6 @@ void ESP32BLE::gatts_event_handler(esp_gatts_cb_event_t event, esp_gatt_if_t gat
void ESP32BLE::gattc_event_handler(esp_gattc_cb_event_t event, esp_gatt_if_t gattc_if,
esp_ble_gattc_cb_param_t *param) {
enqueue_ble_event(event, gattc_if, param);
// Wake up main loop to process GATT event immediately
#ifdef USE_SOCKET_SELECT_SUPPORT
global_ble->notify_main_loop_();
#endif
}
#endif
@@ -657,89 +630,6 @@ void ESP32BLE::dump_config() {
}
}
#ifdef USE_SOCKET_SELECT_SUPPORT
void ESP32BLE::setup_event_notification_() {
// Create UDP socket for event notifications
this->notify_fd_ = lwip_socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (this->notify_fd_ < 0) {
ESP_LOGW(TAG, "Event socket create failed: %d", errno);
return;
}
// Bind to loopback with auto-assigned port
struct sockaddr_in addr = {};
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = lwip_htonl(INADDR_LOOPBACK);
addr.sin_port = 0; // Auto-assign port
if (lwip_bind(this->notify_fd_, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
ESP_LOGW(TAG, "Event socket bind failed: %d", errno);
lwip_close(this->notify_fd_);
this->notify_fd_ = -1;
return;
}
// Get the assigned address and connect to it
// Connecting a UDP socket allows using send() instead of sendto() for better performance
struct sockaddr_in notify_addr;
socklen_t len = sizeof(notify_addr);
if (lwip_getsockname(this->notify_fd_, (struct sockaddr *) &notify_addr, &len) < 0) {
ESP_LOGW(TAG, "Event socket address failed: %d", errno);
lwip_close(this->notify_fd_);
this->notify_fd_ = -1;
return;
}
// Connect to self (loopback) - allows using send() instead of sendto()
// After connect(), no need to store notify_addr - the socket remembers it
if (lwip_connect(this->notify_fd_, (struct sockaddr *) &notify_addr, sizeof(notify_addr)) < 0) {
ESP_LOGW(TAG, "Event socket connect failed: %d", errno);
lwip_close(this->notify_fd_);
this->notify_fd_ = -1;
return;
}
// Set non-blocking mode
int flags = lwip_fcntl(this->notify_fd_, F_GETFL, 0);
lwip_fcntl(this->notify_fd_, F_SETFL, flags | O_NONBLOCK);
// Register with application's select() loop
if (!App.register_socket_fd(this->notify_fd_)) {
ESP_LOGW(TAG, "Event socket register failed");
lwip_close(this->notify_fd_);
this->notify_fd_ = -1;
return;
}
ESP_LOGD(TAG, "Event socket ready");
}
void ESP32BLE::cleanup_event_notification_() {
if (this->notify_fd_ >= 0) {
App.unregister_socket_fd(this->notify_fd_);
lwip_close(this->notify_fd_);
this->notify_fd_ = -1;
ESP_LOGD(TAG, "Event socket closed");
}
}
void ESP32BLE::drain_event_notifications_() {
// Called from main loop to drain any pending notifications
// Must check is_socket_ready() to avoid blocking on empty socket
if (this->notify_fd_ >= 0 && App.is_socket_ready(this->notify_fd_)) {
char buffer[BLE_EVENT_NOTIFY_DRAIN_BUFFER_SIZE];
// Drain all pending notifications with non-blocking reads
// Multiple BLE events may have triggered multiple writes, so drain until EWOULDBLOCK
// We control both ends of this loopback socket (always write 1 byte per event),
// so no error checking needed - any errors indicate catastrophic system failure
while (lwip_recvfrom(this->notify_fd_, buffer, sizeof(buffer), 0, nullptr, nullptr) > 0) {
// Just draining, no action needed - actual BLE events are already queued
}
}
}
#endif // USE_SOCKET_SELECT_SUPPORT
uint64_t ble_addr_to_uint64(const esp_bd_addr_t address) {
uint64_t u = 0;
u |= uint64_t(address[0] & 0xFF) << 40;

View File

@@ -25,10 +25,6 @@
#include <esp_gattc_api.h>
#include <esp_gatts_api.h>
#ifdef USE_SOCKET_SELECT_SUPPORT
#include <lwip/sockets.h>
#endif
namespace esphome::esp32_ble {
// Maximum size of the BLE event queue
@@ -166,13 +162,6 @@ class ESP32BLE : public Component {
void advertising_init_();
#endif
#ifdef USE_SOCKET_SELECT_SUPPORT
void setup_event_notification_(); // Create notification socket
void cleanup_event_notification_(); // Close and unregister socket
inline void notify_main_loop_(); // Wake up select() from BLE thread (hot path - inlined)
void drain_event_notifications_(); // Read pending notifications in main loop
#endif
private:
template<typename... Args> friend void enqueue_ble_event(Args... args);
@@ -207,13 +196,6 @@ class ESP32BLE : public Component {
esp_ble_io_cap_t io_cap_{ESP_IO_CAP_NONE}; // 4 bytes (enum)
uint32_t advertising_cycle_time_{}; // 4 bytes
#ifdef USE_SOCKET_SELECT_SUPPORT
// Event notification socket for waking up main loop from BLE thread
// Uses connected UDP loopback socket to wake lwip_select() with ~12μs latency vs 0-16ms timeout
// Socket is connected during setup, allowing use of send() instead of sendto() for efficiency
int notify_fd_{-1}; // 4 bytes (file descriptor)
#endif
// 2-byte aligned members
uint16_t appearance_{0}; // 2 bytes
@@ -225,29 +207,6 @@ class ESP32BLE : public Component {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
extern ESP32BLE *global_ble;
#ifdef USE_SOCKET_SELECT_SUPPORT
// Inline implementations for hot-path functions
// These are called from BLE thread (notify) and main loop (drain) on every event
// Small buffer for draining notification bytes (1 byte sent per BLE event)
// Size allows draining multiple notifications per recvfrom() without wasting stack
static constexpr size_t BLE_EVENT_NOTIFY_DRAIN_BUFFER_SIZE = 16;
inline void ESP32BLE::notify_main_loop_() {
// Called from BLE thread context when events are queued
// Wakes up lwip_select() in main loop by writing to connected loopback socket
if (this->notify_fd_ >= 0) {
const char dummy = 1;
// Non-blocking send - if it fails (unlikely), select() will wake on timeout anyway
// No error checking needed: we control both ends of this loopback socket, and the
// BLE event is already queued. Notification is best-effort to reduce latency.
// This is safe to call from BLE thread - send() is thread-safe in lwip
// Socket is already connected to loopback address, so send() is faster than sendto()
lwip_send(this->notify_fd_, &dummy, 1, 0);
}
}
#endif // USE_SOCKET_SELECT_SUPPORT
template<typename... Ts> class BLEEnabledCondition : public Condition<Ts...> {
public:
bool check(Ts... x) override { return global_ble->is_active(); }

View File

@@ -281,15 +281,19 @@ void ESPHomeOTAComponent::handle_data_() {
#endif
// Acknowledge auth OK - 1 byte
this->write_byte_(ota::OTA_RESPONSE_AUTH_OK);
buf[0] = ota::OTA_RESPONSE_AUTH_OK;
this->writeall_(buf, 1);
// Read size, 4 bytes MSB first
if (!this->readall_(buf, 4)) {
this->log_read_error_(LOG_STR("size"));
goto error; // NOLINT(cppcoreguidelines-avoid-goto)
}
ota_size = (static_cast<size_t>(buf[0]) << 24) | (static_cast<size_t>(buf[1]) << 16) |
(static_cast<size_t>(buf[2]) << 8) | buf[3];
ota_size = 0;
for (uint8_t i = 0; i < 4; i++) {
ota_size <<= 8;
ota_size |= buf[i];
}
ESP_LOGV(TAG, "Size is %u bytes", ota_size);
// Now that we've passed authentication and are actually
@@ -309,7 +313,8 @@ void ESPHomeOTAComponent::handle_data_() {
update_started = true;
// Acknowledge prepare OK - 1 byte
this->write_byte_(ota::OTA_RESPONSE_UPDATE_PREPARE_OK);
buf[0] = ota::OTA_RESPONSE_UPDATE_PREPARE_OK;
this->writeall_(buf, 1);
// Read binary MD5, 32 bytes
if (!this->readall_(buf, 32)) {
@@ -321,7 +326,8 @@ void ESPHomeOTAComponent::handle_data_() {
this->backend_->set_update_md5(sbuf);
// Acknowledge MD5 OK - 1 byte
this->write_byte_(ota::OTA_RESPONSE_BIN_MD5_OK);
buf[0] = ota::OTA_RESPONSE_BIN_MD5_OK;
this->writeall_(buf, 1);
while (total < ota_size) {
// TODO: timeout check
@@ -348,7 +354,8 @@ void ESPHomeOTAComponent::handle_data_() {
total += read;
#if USE_OTA_VERSION == 2
while (size_acknowledged + OTA_BLOCK_SIZE <= total || (total == ota_size && size_acknowledged < ota_size)) {
this->write_byte_(ota::OTA_RESPONSE_CHUNK_OK);
buf[0] = ota::OTA_RESPONSE_CHUNK_OK;
this->writeall_(buf, 1);
size_acknowledged += OTA_BLOCK_SIZE;
}
#endif
@@ -367,7 +374,8 @@ void ESPHomeOTAComponent::handle_data_() {
}
// Acknowledge receive OK - 1 byte
this->write_byte_(ota::OTA_RESPONSE_RECEIVE_OK);
buf[0] = ota::OTA_RESPONSE_RECEIVE_OK;
this->writeall_(buf, 1);
error_code = this->backend_->end();
if (error_code != ota::OTA_RESPONSE_OK) {
@@ -376,7 +384,8 @@ void ESPHomeOTAComponent::handle_data_() {
}
// Acknowledge Update end OK - 1 byte
this->write_byte_(ota::OTA_RESPONSE_UPDATE_END_OK);
buf[0] = ota::OTA_RESPONSE_UPDATE_END_OK;
this->writeall_(buf, 1);
// Read ACK
if (!this->readall_(buf, 1) || buf[0] != ota::OTA_RESPONSE_OK) {
@@ -395,7 +404,8 @@ void ESPHomeOTAComponent::handle_data_() {
App.safe_reboot();
error:
this->write_byte_(static_cast<uint8_t>(error_code));
buf[0] = static_cast<uint8_t>(error_code);
this->writeall_(buf, 1);
this->cleanup_connection_();
if (this->backend_ != nullptr && update_started) {

View File

@@ -53,7 +53,6 @@ class ESPHomeOTAComponent : public ota::OTAComponent {
#endif // USE_OTA_PASSWORD
bool readall_(uint8_t *buf, size_t len);
bool writeall_(const uint8_t *buf, size_t len);
inline bool write_byte_(uint8_t byte) { return this->writeall_(&byte, 1); }
bool try_read_(size_t to_read, const LogString *desc);
bool try_write_(size_t to_write, const LogString *desc);

View File

@@ -51,14 +51,7 @@ void FanCall::validate_() {
if (!this->preset_mode_.empty()) {
const auto &preset_modes = traits.supported_preset_modes();
bool found = false;
for (const auto &mode : preset_modes) {
if (strcmp(mode, this->preset_mode_.c_str()) == 0) {
found = true;
break;
}
}
if (!found) {
if (preset_modes.find(this->preset_mode_) == preset_modes.end()) {
ESP_LOGW(TAG, "%s: Preset mode '%s' not supported", this->parent_.get_name().c_str(), this->preset_mode_.c_str());
this->preset_mode_.clear();
}
@@ -99,12 +92,11 @@ FanCall FanRestoreState::to_call(Fan &fan) {
call.set_speed(this->speed);
call.set_direction(this->direction);
auto traits = fan.get_traits();
if (traits.supports_preset_modes()) {
if (fan.get_traits().supports_preset_modes()) {
// Use stored preset index to get preset name
const auto &preset_modes = traits.supported_preset_modes();
const auto &preset_modes = fan.get_traits().supported_preset_modes();
if (this->preset_mode < preset_modes.size()) {
call.set_preset_mode(preset_modes[this->preset_mode]);
call.set_preset_mode(*std::next(preset_modes.begin(), this->preset_mode));
}
}
return call;
@@ -115,12 +107,11 @@ void FanRestoreState::apply(Fan &fan) {
fan.speed = this->speed;
fan.direction = this->direction;
auto traits = fan.get_traits();
if (traits.supports_preset_modes()) {
if (fan.get_traits().supports_preset_modes()) {
// Use stored preset index to get preset name
const auto &preset_modes = traits.supported_preset_modes();
const auto &preset_modes = fan.get_traits().supported_preset_modes();
if (this->preset_mode < preset_modes.size()) {
fan.preset_mode = preset_modes[this->preset_mode];
fan.preset_mode = *std::next(preset_modes.begin(), this->preset_mode);
}
}
fan.publish_state();
@@ -191,25 +182,18 @@ void Fan::save_state_() {
return;
}
auto traits = this->get_traits();
FanRestoreState state{};
state.state = this->state;
state.oscillating = this->oscillating;
state.speed = this->speed;
state.direction = this->direction;
if (traits.supports_preset_modes() && !this->preset_mode.empty()) {
const auto &preset_modes = traits.supported_preset_modes();
if (this->get_traits().supports_preset_modes() && !this->preset_mode.empty()) {
const auto &preset_modes = this->get_traits().supported_preset_modes();
// Store index of current preset mode
size_t i = 0;
for (const auto &mode : preset_modes) {
if (strcmp(mode, this->preset_mode.c_str()) == 0) {
state.preset_mode = i;
break;
}
i++;
}
auto preset_iterator = preset_modes.find(this->preset_mode);
if (preset_iterator != preset_modes.end())
state.preset_mode = std::distance(preset_modes.begin(), preset_iterator);
}
this->rtc_.save(&state);
@@ -232,8 +216,8 @@ void Fan::dump_traits_(const char *tag, const char *prefix) {
}
if (traits.supports_preset_modes()) {
ESP_LOGCONFIG(tag, "%s Supported presets:", prefix);
for (const char *s : traits.supported_preset_modes())
ESP_LOGCONFIG(tag, "%s - %s", prefix, s);
for (const std::string &s : traits.supported_preset_modes())
ESP_LOGCONFIG(tag, "%s - %s", prefix, s.c_str());
}
}

View File

@@ -1,10 +1,16 @@
#include <set>
#include <utility>
#pragma once
#include <vector>
#include <initializer_list>
namespace esphome {
#ifdef USE_API
namespace api {
class APIConnection;
} // namespace api
#endif
namespace fan {
class FanTraits {
@@ -30,27 +36,27 @@ class FanTraits {
/// Set whether this fan supports changing direction
void set_direction(bool direction) { this->direction_ = direction; }
/// Return the preset modes supported by the fan.
const std::vector<const char *> &supported_preset_modes() const { return this->preset_modes_; }
/// Set the preset modes supported by the fan (from initializer list).
void set_supported_preset_modes(std::initializer_list<const char *> preset_modes) {
this->preset_modes_ = preset_modes;
}
/// Set the preset modes supported by the fan (from vector).
void set_supported_preset_modes(const std::vector<const char *> &preset_modes) { this->preset_modes_ = preset_modes; }
// Deleted overloads to catch incorrect std::string usage at compile time with clear error messages
void set_supported_preset_modes(const std::vector<std::string> &preset_modes) = delete;
void set_supported_preset_modes(std::initializer_list<std::string> preset_modes) = delete;
std::set<std::string> supported_preset_modes() const { return this->preset_modes_; }
/// Set the preset modes supported by the fan.
void set_supported_preset_modes(const std::set<std::string> &preset_modes) { this->preset_modes_ = preset_modes; }
/// Return if preset modes are supported
bool supports_preset_modes() const { return !this->preset_modes_.empty(); }
protected:
#ifdef USE_API
// The API connection is a friend class to access internal methods
friend class api::APIConnection;
// This method returns a reference to the internal preset modes set.
// It is used by the API to avoid copying data when encoding messages.
// Warning: Do not use this method outside of the API connection code.
// It returns a reference to internal data that can be invalidated.
const std::set<std::string> &supported_preset_modes_for_api_() const { return this->preset_modes_; }
#endif
bool oscillation_{false};
bool speed_{false};
bool direction_{false};
int speed_count_{};
std::vector<const char *> preset_modes_{};
std::set<std::string> preset_modes_{};
};
} // namespace fan

View File

@@ -39,7 +39,6 @@ CONFIG_SCHEMA = (
# due to hardware limitations or lack of reliable interrupt support. This ensures
# stable operation on these platforms. Future maintainers should verify platform
# capabilities before changing this default behavior.
# nrf52 has no gpio interrupts implemented yet
cv.SplitDefault(
CONF_USE_INTERRUPT,
bk72xx=False,
@@ -47,7 +46,7 @@ CONFIG_SCHEMA = (
esp8266=True,
host=True,
ln882x=False,
nrf52=False,
nrf52=True,
rp2040=True,
rtl87xx=False,
): cv.boolean,

View File

@@ -1,5 +1,7 @@
#pragma once
#include <set>
#include "esphome/core/automation.h"
#include "esphome/components/output/binary_output.h"
#include "esphome/components/output/float_output.h"
@@ -20,7 +22,7 @@ class HBridgeFan : public Component, public fan::Fan {
void set_pin_a(output::FloatOutput *pin_a) { pin_a_ = pin_a; }
void set_pin_b(output::FloatOutput *pin_b) { pin_b_ = pin_b; }
void set_enable_pin(output::FloatOutput *enable) { enable_ = enable; }
void set_preset_modes(std::initializer_list<const char *> presets) { preset_modes_ = presets; }
void set_preset_modes(const std::set<std::string> &presets) { preset_modes_ = presets; }
void setup() override;
void dump_config() override;
@@ -36,7 +38,7 @@ class HBridgeFan : public Component, public fan::Fan {
int speed_count_{};
DecayMode decay_mode_{DECAY_MODE_SLOW};
fan::FanTraits traits_;
std::vector<const char *> preset_modes_{};
std::set<std::string> preset_modes_{};
void control(const fan::FanCall &call) override;
void write_state_();

View File

@@ -671,33 +671,18 @@ async def write_image(config, all_frames=False):
resize = config.get(CONF_RESIZE)
if is_svg_file(path):
# Local import so use of non-SVG files needn't require cairosvg installed
from pyexpat import ExpatError
from xml.etree.ElementTree import ParseError
from cairosvg import svg2png
from cairosvg.helpers import PointError
if not resize:
resize = (None, None)
try:
with open(path, "rb") as file:
image = svg2png(
file_obj=file,
output_width=resize[0],
output_height=resize[1],
)
image = Image.open(io.BytesIO(image))
width, height = image.size
except (
ValueError,
ParseError,
IndexError,
ExpatError,
AttributeError,
TypeError,
PointError,
) as e:
raise core.EsphomeError(f"Could not load SVG image {path}: {e}") from e
with open(path, "rb") as file:
image = svg2png(
file_obj=file,
output_width=resize[0],
output_height=resize[1],
)
image = Image.open(io.BytesIO(image))
width, height = image.size
else:
image = Image.open(path)
width, height = image.size

View File

@@ -2,13 +2,18 @@
#include "esphome/core/hal.h"
#include "esphome/components/lcd_base/lcd_display.h"
#include "esphome/components/display/display.h"
namespace esphome {
namespace lcd_gpio {
class GPIOLCDDisplay;
using gpio_lcd_writer_t = display::DisplayWriter<GPIOLCDDisplay>;
class GPIOLCDDisplay : public lcd_base::LCDDisplay {
public:
void set_writer(std::function<void(GPIOLCDDisplay &)> &&writer) { this->writer_ = std::move(writer); }
void set_writer(gpio_lcd_writer_t &&writer) { this->writer_ = std::move(writer); }
void setup() override;
void set_data_pins(GPIOPin *d0, GPIOPin *d1, GPIOPin *d2, GPIOPin *d3) {
this->data_pins_[0] = d0;
@@ -43,7 +48,7 @@ class GPIOLCDDisplay : public lcd_base::LCDDisplay {
GPIOPin *rw_pin_{nullptr};
GPIOPin *enable_pin_{nullptr};
GPIOPin *data_pins_[8]{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr};
std::function<void(GPIOLCDDisplay &)> writer_;
gpio_lcd_writer_t writer_;
};
} // namespace lcd_gpio

View File

@@ -3,13 +3,18 @@
#include "esphome/core/component.h"
#include "esphome/components/lcd_base/lcd_display.h"
#include "esphome/components/i2c/i2c.h"
#include "esphome/components/display/display.h"
namespace esphome {
namespace lcd_pcf8574 {
class PCF8574LCDDisplay;
using pcf8574_lcd_writer_t = display::DisplayWriter<PCF8574LCDDisplay>;
class PCF8574LCDDisplay : public lcd_base::LCDDisplay, public i2c::I2CDevice {
public:
void set_writer(std::function<void(PCF8574LCDDisplay &)> &&writer) { this->writer_ = std::move(writer); }
void set_writer(pcf8574_lcd_writer_t &&writer) { this->writer_ = std::move(writer); }
void setup() override;
void dump_config() override;
void backlight();
@@ -24,7 +29,7 @@ class PCF8574LCDDisplay : public lcd_base::LCDDisplay, public i2c::I2CDevice {
// Stores the current state of the backlight.
uint8_t backlight_value_;
std::function<void(PCF8574LCDDisplay &)> writer_;
pcf8574_lcd_writer_t writer_;
};
} // namespace lcd_pcf8574

View File

@@ -58,7 +58,7 @@ from .types import (
FontEngine,
IdleTrigger,
ObjUpdateAction,
PlainTrigger,
PauseTrigger,
lv_font_t,
lv_group_t,
lv_style_t,
@@ -151,13 +151,6 @@ for w_type in WIDGET_TYPES.values():
create_modify_schema(w_type),
)(update_to_code)
SIMPLE_TRIGGERS = (
df.CONF_ON_PAUSE,
df.CONF_ON_RESUME,
df.CONF_ON_DRAW_START,
df.CONF_ON_DRAW_END,
)
def as_macro(macro, value):
if value is None:
@@ -251,9 +244,9 @@ def final_validation(configs):
for w in refreshed_widgets:
path = global_config.get_path_for_id(w)
widget_conf = global_config.get_config_for_path(path[:-1])
if not any(isinstance(v, (Lambda, dict)) for v in widget_conf.values()):
if not any(isinstance(v, Lambda) for v in widget_conf.values()):
raise cv.Invalid(
f"Widget '{w}' does not have any dynamic properties to refresh",
f"Widget '{w}' does not have any templated properties to refresh",
)
@@ -373,16 +366,16 @@ async def to_code(configs):
conf[CONF_TRIGGER_ID], lv_component, templ
)
await build_automation(idle_trigger, [], conf)
for trigger_name in SIMPLE_TRIGGERS:
if conf := config.get(trigger_name):
trigger_var = cg.new_Pvariable(conf[CONF_TRIGGER_ID])
await build_automation(trigger_var, [], conf)
cg.add(
getattr(
lv_component,
f"set_{trigger_name.removeprefix('on_')}_trigger",
)(trigger_var)
)
for conf in config.get(df.CONF_ON_PAUSE, ()):
pause_trigger = cg.new_Pvariable(
conf[CONF_TRIGGER_ID], lv_component, True
)
await build_automation(pause_trigger, [], conf)
for conf in config.get(df.CONF_ON_RESUME, ()):
resume_trigger = cg.new_Pvariable(
conf[CONF_TRIGGER_ID], lv_component, False
)
await build_automation(resume_trigger, [], conf)
await add_on_boot_triggers(config.get(CONF_ON_BOOT, ()))
# This must be done after all widgets are created
@@ -450,15 +443,16 @@ LVGL_SCHEMA = cv.All(
),
}
),
**{
cv.Optional(x): validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(PlainTrigger),
},
single=True,
)
for x in SIMPLE_TRIGGERS
},
cv.Optional(df.CONF_ON_PAUSE): validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(PauseTrigger),
}
),
cv.Optional(df.CONF_ON_RESUME): validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(PauseTrigger),
}
),
cv.Exclusive(df.CONF_WIDGETS, CONF_PAGES): cv.ensure_list(
WIDGET_SCHEMA
),

View File

@@ -137,11 +137,7 @@ async def lvgl_is_idle(config, condition_id, template_arg, args):
lvgl = config[CONF_LVGL_ID]
timeout = await lv_milliseconds.process(config[CONF_TIMEOUT])
async with LambdaContext(LVGL_COMP_ARG, return_type=cg.bool_) as context:
lv_add(
ReturnStatement(
lv_expr.disp_get_inactive_time(lvgl_comp.get_disp()) > timeout
)
)
lv_add(ReturnStatement(lvgl_comp.is_idle(timeout)))
var = cg.new_Pvariable(
condition_id,
TemplateArguments(LvglComponent, *template_arg),
@@ -404,8 +400,7 @@ async def obj_refresh_to_code(config, action_id, template_arg, args):
# must pass all widget-specific options here, even if not templated, but only do so if at least one is
# templated. First filter out common style properties.
config = {k: v for k, v in widget.config.items() if k not in ALL_STYLES}
# Check if v is a Lambda or a dict, implying it is dynamic
if any(isinstance(v, (Lambda, dict)) for v in config.values()):
if any(isinstance(v, Lambda) for v in config.values()):
await widget.type.to_code(widget, config)
if (
widget.type.w_type.value_property is not None

View File

@@ -31,7 +31,7 @@ async def to_code(config):
lvgl_static.add_event_cb(
widget.obj,
await pressed_ctx.get_lambda(),
LV_EVENT.PRESSED,
LV_EVENT.PRESSING,
LV_EVENT.RELEASED,
)
)

View File

@@ -483,8 +483,6 @@ CONF_MSGBOXES = "msgboxes"
CONF_OBJ = "obj"
CONF_ONE_CHECKED = "one_checked"
CONF_ONE_LINE = "one_line"
CONF_ON_DRAW_START = "on_draw_start"
CONF_ON_DRAW_END = "on_draw_end"
CONF_ON_PAUSE = "on_pause"
CONF_ON_RESUME = "on_resume"
CONF_ON_SELECT = "on_select"

View File

@@ -82,18 +82,6 @@ static void rounder_cb(lv_disp_drv_t *disp_drv, lv_area_t *area) {
area->y2 = (area->y2 + draw_rounding) / draw_rounding * draw_rounding - 1;
}
void LvglComponent::monitor_cb(lv_disp_drv_t *disp_drv, uint32_t time, uint32_t px) {
ESP_LOGVV(TAG, "Draw end: %" PRIu32 " pixels in %" PRIu32 " ms", px, time);
auto *comp = static_cast<LvglComponent *>(disp_drv->user_data);
comp->draw_end_();
}
void LvglComponent::render_start_cb(lv_disp_drv_t *disp_drv) {
ESP_LOGVV(TAG, "Draw start");
auto *comp = static_cast<LvglComponent *>(disp_drv->user_data);
comp->draw_start_();
}
lv_event_code_t lv_api_event; // NOLINT
lv_event_code_t lv_update_event; // NOLINT
void LvglComponent::dump_config() {
@@ -113,10 +101,7 @@ void LvglComponent::set_paused(bool paused, bool show_snow) {
lv_disp_trig_activity(this->disp_); // resets the inactivity time
lv_obj_invalidate(lv_scr_act());
}
if (paused && this->pause_callback_ != nullptr)
this->pause_callback_->trigger();
if (!paused && this->resume_callback_ != nullptr)
this->resume_callback_->trigger();
this->pause_callbacks_.call(paused);
}
void LvglComponent::esphome_lvgl_init() {
@@ -240,6 +225,13 @@ IdleTrigger::IdleTrigger(LvglComponent *parent, TemplatableValue<uint32_t> timeo
});
}
PauseTrigger::PauseTrigger(LvglComponent *parent, TemplatableValue<bool> paused) : paused_(std::move(paused)) {
parent->add_on_pause_callback([this](bool pausing) {
if (this->paused_.value() == pausing)
this->trigger();
});
}
#ifdef USE_LVGL_TOUCHSCREEN
LVTouchListener::LVTouchListener(uint16_t long_press_time, uint16_t long_press_repeat_time, LvglComponent *parent) {
this->set_parent(parent);
@@ -482,12 +474,6 @@ void LvglComponent::setup() {
return;
}
}
if (this->draw_start_callback_ != nullptr) {
this->disp_drv_.render_start_cb = render_start_cb;
}
if (this->draw_end_callback_ != nullptr) {
this->disp_drv_.monitor_cb = monitor_cb;
}
#if LV_USE_LOG
lv_log_register_print_cb([](const char *buf) {
auto next = strchr(buf, ')');
@@ -516,9 +502,8 @@ void LvglComponent::loop() {
if (this->paused_) {
if (this->show_snow_)
this->write_random_();
} else {
lv_timer_handler_run_in_period(5);
}
lv_timer_handler_run_in_period(5);
}
#ifdef USE_LVGL_ANIMIMG

View File

@@ -171,10 +171,9 @@ class LvglComponent : public PollingComponent {
void add_on_idle_callback(std::function<void(uint32_t)> &&callback) {
this->idle_callbacks_.add(std::move(callback));
}
static void monitor_cb(lv_disp_drv_t *disp_drv, uint32_t time, uint32_t px);
static void render_start_cb(lv_disp_drv_t *disp_drv);
void add_on_pause_callback(std::function<void(bool)> &&callback) { this->pause_callbacks_.add(std::move(callback)); }
void dump_config() override;
bool is_idle(uint32_t idle_ms) { return lv_disp_get_inactive_time(this->disp_) > idle_ms; }
lv_disp_t *get_disp() { return this->disp_; }
lv_obj_t *get_scr_act() { return lv_disp_get_scr_act(this->disp_); }
// Pause or resume the display.
@@ -214,20 +213,12 @@ class LvglComponent : public PollingComponent {
size_t draw_rounding{2};
display::DisplayRotation rotation{display::DISPLAY_ROTATION_0_DEGREES};
void set_pause_trigger(Trigger<> *trigger) { this->pause_callback_ = trigger; }
void set_resume_trigger(Trigger<> *trigger) { this->resume_callback_ = trigger; }
void set_draw_start_trigger(Trigger<> *trigger) { this->draw_start_callback_ = trigger; }
void set_draw_end_trigger(Trigger<> *trigger) { this->draw_end_callback_ = trigger; }
protected:
// these functions are never called unless the callbacks are non-null since the
// LVGL callbacks that call them are not set unless the start/end callbacks are non-null
void draw_start_() const { this->draw_start_callback_->trigger(); }
void draw_end_() const { this->draw_end_callback_->trigger(); }
void write_random_();
void draw_buffer_(const lv_area_t *area, lv_color_t *ptr);
void flush_cb_(lv_disp_drv_t *disp_drv, const lv_area_t *area, lv_color_t *color_p);
std::vector<display::Display *> displays_{};
size_t buffer_frac_{1};
bool full_refresh_{};
@@ -244,10 +235,7 @@ class LvglComponent : public PollingComponent {
std::map<lv_group_t *, lv_obj_t *> focus_marks_{};
CallbackManager<void(uint32_t)> idle_callbacks_{};
Trigger<> *pause_callback_{};
Trigger<> *resume_callback_{};
Trigger<> *draw_start_callback_{};
Trigger<> *draw_end_callback_{};
CallbackManager<void(bool)> pause_callbacks_{};
lv_color_t *rotate_buf_{};
};
@@ -260,6 +248,14 @@ class IdleTrigger : public Trigger<> {
bool is_idle_{};
};
class PauseTrigger : public Trigger<> {
public:
explicit PauseTrigger(LvglComponent *parent, TemplatableValue<bool> paused);
protected:
TemplatableValue<bool> paused_;
};
template<typename... Ts> class LvglAction : public Action<Ts...>, public Parented<LvglComponent> {
public:
explicit LvglAction(std::function<void(LvglComponent *)> &&lamb) : action_(std::move(lamb)) {}

View File

@@ -3,7 +3,6 @@ import sys
from esphome import automation, codegen as cg
from esphome.const import CONF_MAX_VALUE, CONF_MIN_VALUE, CONF_TEXT, CONF_VALUE
from esphome.cpp_generator import MockObj, MockObjClass
from esphome.cpp_types import esphome_ns
from .defines import lvgl_ns
from .lvcode import lv_expr
@@ -43,11 +42,8 @@ lv_event_code_t = cg.global_ns.enum("lv_event_code_t")
lv_indev_type_t = cg.global_ns.enum("lv_indev_type_t")
lv_key_t = cg.global_ns.enum("lv_key_t")
FontEngine = lvgl_ns.class_("FontEngine")
PlainTrigger = esphome_ns.class_("Trigger<>", automation.Trigger.template())
DrawEndTrigger = esphome_ns.class_(
"Trigger<uint32_t, uint32_t>", automation.Trigger.template(cg.uint32, cg.uint32)
)
IdleTrigger = lvgl_ns.class_("IdleTrigger", automation.Trigger.template())
PauseTrigger = lvgl_ns.class_("PauseTrigger", automation.Trigger.template())
ObjUpdateAction = lvgl_ns.class_("ObjUpdateAction", automation.Action)
LvglCondition = lvgl_ns.class_("LvglCondition", automation.Condition)
LvglAction = lvgl_ns.class_("LvglAction", automation.Action)

View File

@@ -4,13 +4,14 @@
#include "esphome/core/time.h"
#include "esphome/components/spi/spi.h"
#include "esphome/components/display/display.h"
namespace esphome {
namespace max7219 {
class MAX7219Component;
using max7219_writer_t = std::function<void(MAX7219Component &)>;
using max7219_writer_t = display::DisplayWriter<MAX7219Component>;
class MAX7219Component : public PollingComponent,
public spi::SPIDevice<spi::BIT_ORDER_MSB_FIRST, spi::CLOCK_POLARITY_LOW,
@@ -57,7 +58,7 @@ class MAX7219Component : public PollingComponent,
uint8_t num_chips_{1};
uint8_t *buffer_;
bool reverse_{false};
optional<max7219_writer_t> writer_{};
max7219_writer_t writer_{};
};
} // namespace max7219

View File

@@ -23,7 +23,7 @@ enum ScrollMode {
class MAX7219Component;
using max7219_writer_t = std::function<void(MAX7219Component &)>;
using max7219_writer_t = display::DisplayWriter<MAX7219Component>;
class MAX7219Component : public display::DisplayBuffer,
public spi::SPIDevice<spi::BIT_ORDER_MSB_FIRST, spi::CLOCK_POLARITY_LOW,
@@ -117,7 +117,7 @@ class MAX7219Component : public display::DisplayBuffer,
uint32_t last_scroll_ = 0;
uint16_t stepsleft_;
size_t get_buffer_length_();
optional<max7219_writer_t> writer_local_{};
max7219_writer_t writer_local_{};
};
} // namespace max7219digit

View File

@@ -55,7 +55,6 @@ CONFIG_SCHEMA = cv.Schema(
esp32=False,
rp2040=False,
bk72xx=False,
host=False,
): cv.All(
cv.boolean,
cv.Any(
@@ -65,7 +64,6 @@ CONFIG_SCHEMA = cv.Schema(
esp8266_arduino=cv.Version(0, 0, 0),
rp2040_arduino=cv.Version(0, 0, 0),
bk72xx_arduino=cv.Version(1, 7, 0),
host=cv.Version(0, 0, 0),
),
cv.boolean_false,
),

View File

@@ -323,8 +323,6 @@ void Nextion::loop() {
this->set_touch_sleep_timeout(this->touch_sleep_timeout_);
}
this->set_auto_wake_on_touch(this->connection_state_.auto_wake_on_touch_);
this->connection_state_.ignore_is_setup_ = false;
}

View File

@@ -9,6 +9,7 @@
#include "esphome/components/uart/uart.h"
#include "nextion_base.h"
#include "nextion_component.h"
#include "esphome/components/display/display.h"
#include "esphome/components/display/display_color_utils.h"
#ifdef USE_NEXTION_TFT_UPLOAD
@@ -31,7 +32,7 @@ namespace nextion {
class Nextion;
class NextionComponentBase;
using nextion_writer_t = std::function<void(Nextion &)>;
using nextion_writer_t = display::DisplayWriter<Nextion>;
static const std::string COMMAND_DELIMITER{static_cast<char>(255), static_cast<char>(255), static_cast<char>(255)};
@@ -1471,7 +1472,7 @@ class Nextion : public NextionBase, public PollingComponent, public uart::UARTDe
CallbackManager<void(uint8_t, uint8_t, bool)> touch_callback_{};
CallbackManager<void()> buffer_overflow_callback_{};
optional<nextion_writer_t> writer_;
nextion_writer_t writer_;
optional<float> brightness_;
#ifdef USE_NEXTION_CONFIG_DUMP_DEVICE_INFO

View File

@@ -290,7 +290,6 @@ def show_logs(config: ConfigType, args, devices: list[str]) -> bool:
address = ble_device.address
else:
return True
if is_mac_address(address):
asyncio.run(logger_connect(address))
return True

View File

@@ -3,6 +3,7 @@
#include "esphome/core/component.h"
#include "esphome/core/defines.h"
#include "esphome/components/ble_client/ble_client.h"
#include "esphome/components/display/display.h"
#include <cinttypes>
@@ -29,7 +30,7 @@ enum UNIT {
UNIT_DEG_E, ///< show "°E"
};
using pvvx_writer_t = std::function<void(PVVXDisplay &)>;
using pvvx_writer_t = display::DisplayWriter<PVVXDisplay>;
class PVVXDisplay : public ble_client::BLEClientNode, public PollingComponent {
public:
@@ -126,7 +127,7 @@ class PVVXDisplay : public ble_client::BLEClientNode, public PollingComponent {
esp32_ble_tracker::ESPBTUUID char_uuid_ =
esp32_ble_tracker::ESPBTUUID::from_raw("00001f1f-0000-1000-8000-00805f9b34fb");
optional<pvvx_writer_t> writer_{};
pvvx_writer_t writer_{};
};
} // namespace pvvx_mithermometer

View File

@@ -33,13 +33,19 @@ Message Format:
class ABBWelcomeData {
public:
// Make default
ABBWelcomeData() : data_{0x55, 0xff} {}
ABBWelcomeData() {
std::fill(std::begin(this->data_), std::end(this->data_), 0);
this->data_[0] = 0x55;
this->data_[1] = 0xff;
}
// Make from initializer_list
ABBWelcomeData(std::initializer_list<uint8_t> data) : data_{} {
ABBWelcomeData(std::initializer_list<uint8_t> data) {
std::fill(std::begin(this->data_), std::end(this->data_), 0);
std::copy_n(data.begin(), std::min(data.size(), this->data_.size()), this->data_.begin());
}
// Make from vector
ABBWelcomeData(const std::vector<uint8_t> &data) : data_{} {
ABBWelcomeData(const std::vector<uint8_t> &data) {
std::fill(std::begin(this->data_), std::end(this->data_), 0);
std::copy_n(data.begin(), std::min(data.size(), this->data_.size()), this->data_.begin());
}
// Default copy constructor

View File

@@ -2,7 +2,6 @@
#include <memory>
#include <tuple>
#include <forward_list>
#include "esphome/core/automation.h"
#include "esphome/core/component.h"
#include "esphome/core/helpers.h"
@@ -265,22 +264,10 @@ template<class C, typename... Ts> class IsRunningCondition : public Condition<Ts
C *parent_;
};
/** Wait for a script to finish before continuing.
*
* Uses queue-based storage to safely handle concurrent executions.
* While concurrent execution from the same trigger is uncommon, it's possible
* (e.g., rapid button presses, high-frequency sensor updates), so we use
* queue-based storage for correctness.
*/
template<class C, typename... Ts> class ScriptWaitAction : public Action<Ts...>, public Component {
public:
ScriptWaitAction(C *script) : script_(script) {}
void setup() override {
// Start with loop disabled - only enable when there's work to do
this->disable_loop();
}
void play_complex(Ts... x) override {
this->num_running_++;
// Check if we can continue immediately.
@@ -288,11 +275,7 @@ template<class C, typename... Ts> class ScriptWaitAction : public Action<Ts...>,
this->play_next_(x...);
return;
}
// Store parameters for later execution
this->param_queue_.emplace_front(x...);
// Enable loop now that we have work to do
this->enable_loop();
this->var_ = std::make_tuple(x...);
this->loop();
}
@@ -303,30 +286,15 @@ template<class C, typename... Ts> class ScriptWaitAction : public Action<Ts...>,
if (this->script_->is_running())
return;
while (!this->param_queue_.empty()) {
auto &params = this->param_queue_.front();
this->play_next_tuple_(params, typename gens<sizeof...(Ts)>::type());
this->param_queue_.pop_front();
}
// Queue is now empty - disable loop until next play_complex
this->disable_loop();
this->play_next_tuple_(this->var_);
}
void play(Ts... x) override { /* ignore - see play_complex */
}
void stop() override {
this->param_queue_.clear();
this->disable_loop();
}
protected:
template<int... S> void play_next_tuple_(const std::tuple<Ts...> &tuple, seq<S...> /*unused*/) {
this->play_next_(std::get<S>(tuple)...);
}
C *script_;
std::forward_list<std::tuple<Ts...>> param_queue_;
std::tuple<Ts...> var_{};
};
} // namespace script

View File

@@ -12,256 +12,241 @@ CODEOWNERS = ["@bdm310"]
STATE_ARG = "state"
SDL_KeyCode = cg.global_ns.enum("SDL_KeyCode")
SDL_KEYS = (
"SDLK_UNKNOWN",
"SDLK_RETURN",
"SDLK_ESCAPE",
"SDLK_BACKSPACE",
"SDLK_TAB",
"SDLK_SPACE",
"SDLK_EXCLAIM",
"SDLK_QUOTEDBL",
"SDLK_HASH",
"SDLK_PERCENT",
"SDLK_DOLLAR",
"SDLK_AMPERSAND",
"SDLK_QUOTE",
"SDLK_LEFTPAREN",
"SDLK_RIGHTPAREN",
"SDLK_ASTERISK",
"SDLK_PLUS",
"SDLK_COMMA",
"SDLK_MINUS",
"SDLK_PERIOD",
"SDLK_SLASH",
"SDLK_0",
"SDLK_1",
"SDLK_2",
"SDLK_3",
"SDLK_4",
"SDLK_5",
"SDLK_6",
"SDLK_7",
"SDLK_8",
"SDLK_9",
"SDLK_COLON",
"SDLK_SEMICOLON",
"SDLK_LESS",
"SDLK_EQUALS",
"SDLK_GREATER",
"SDLK_QUESTION",
"SDLK_AT",
"SDLK_LEFTBRACKET",
"SDLK_BACKSLASH",
"SDLK_RIGHTBRACKET",
"SDLK_CARET",
"SDLK_UNDERSCORE",
"SDLK_BACKQUOTE",
"SDLK_a",
"SDLK_b",
"SDLK_c",
"SDLK_d",
"SDLK_e",
"SDLK_f",
"SDLK_g",
"SDLK_h",
"SDLK_i",
"SDLK_j",
"SDLK_k",
"SDLK_l",
"SDLK_m",
"SDLK_n",
"SDLK_o",
"SDLK_p",
"SDLK_q",
"SDLK_r",
"SDLK_s",
"SDLK_t",
"SDLK_u",
"SDLK_v",
"SDLK_w",
"SDLK_x",
"SDLK_y",
"SDLK_z",
"SDLK_CAPSLOCK",
"SDLK_F1",
"SDLK_F2",
"SDLK_F3",
"SDLK_F4",
"SDLK_F5",
"SDLK_F6",
"SDLK_F7",
"SDLK_F8",
"SDLK_F9",
"SDLK_F10",
"SDLK_F11",
"SDLK_F12",
"SDLK_PRINTSCREEN",
"SDLK_SCROLLLOCK",
"SDLK_PAUSE",
"SDLK_INSERT",
"SDLK_HOME",
"SDLK_PAGEUP",
"SDLK_DELETE",
"SDLK_END",
"SDLK_PAGEDOWN",
"SDLK_RIGHT",
"SDLK_LEFT",
"SDLK_DOWN",
"SDLK_UP",
"SDLK_NUMLOCKCLEAR",
"SDLK_KP_DIVIDE",
"SDLK_KP_MULTIPLY",
"SDLK_KP_MINUS",
"SDLK_KP_PLUS",
"SDLK_KP_ENTER",
"SDLK_KP_1",
"SDLK_KP_2",
"SDLK_KP_3",
"SDLK_KP_4",
"SDLK_KP_5",
"SDLK_KP_6",
"SDLK_KP_7",
"SDLK_KP_8",
"SDLK_KP_9",
"SDLK_KP_0",
"SDLK_KP_PERIOD",
"SDLK_APPLICATION",
"SDLK_POWER",
"SDLK_KP_EQUALS",
"SDLK_F13",
"SDLK_F14",
"SDLK_F15",
"SDLK_F16",
"SDLK_F17",
"SDLK_F18",
"SDLK_F19",
"SDLK_F20",
"SDLK_F21",
"SDLK_F22",
"SDLK_F23",
"SDLK_F24",
"SDLK_EXECUTE",
"SDLK_HELP",
"SDLK_MENU",
"SDLK_SELECT",
"SDLK_STOP",
"SDLK_AGAIN",
"SDLK_UNDO",
"SDLK_CUT",
"SDLK_COPY",
"SDLK_PASTE",
"SDLK_FIND",
"SDLK_MUTE",
"SDLK_VOLUMEUP",
"SDLK_VOLUMEDOWN",
"SDLK_KP_COMMA",
"SDLK_KP_EQUALSAS400",
"SDLK_ALTERASE",
"SDLK_SYSREQ",
"SDLK_CANCEL",
"SDLK_CLEAR",
"SDLK_PRIOR",
"SDLK_RETURN2",
"SDLK_SEPARATOR",
"SDLK_OUT",
"SDLK_OPER",
"SDLK_CLEARAGAIN",
"SDLK_CRSEL",
"SDLK_EXSEL",
"SDLK_KP_00",
"SDLK_KP_000",
"SDLK_THOUSANDSSEPARATOR",
"SDLK_DECIMALSEPARATOR",
"SDLK_CURRENCYUNIT",
"SDLK_CURRENCYSUBUNIT",
"SDLK_KP_LEFTPAREN",
"SDLK_KP_RIGHTPAREN",
"SDLK_KP_LEFTBRACE",
"SDLK_KP_RIGHTBRACE",
"SDLK_KP_TAB",
"SDLK_KP_BACKSPACE",
"SDLK_KP_A",
"SDLK_KP_B",
"SDLK_KP_C",
"SDLK_KP_D",
"SDLK_KP_E",
"SDLK_KP_F",
"SDLK_KP_XOR",
"SDLK_KP_POWER",
"SDLK_KP_PERCENT",
"SDLK_KP_LESS",
"SDLK_KP_GREATER",
"SDLK_KP_AMPERSAND",
"SDLK_KP_DBLAMPERSAND",
"SDLK_KP_VERTICALBAR",
"SDLK_KP_DBLVERTICALBAR",
"SDLK_KP_COLON",
"SDLK_KP_HASH",
"SDLK_KP_SPACE",
"SDLK_KP_AT",
"SDLK_KP_EXCLAM",
"SDLK_KP_MEMSTORE",
"SDLK_KP_MEMRECALL",
"SDLK_KP_MEMCLEAR",
"SDLK_KP_MEMADD",
"SDLK_KP_MEMSUBTRACT",
"SDLK_KP_MEMMULTIPLY",
"SDLK_KP_MEMDIVIDE",
"SDLK_KP_PLUSMINUS",
"SDLK_KP_CLEAR",
"SDLK_KP_CLEARENTRY",
"SDLK_KP_BINARY",
"SDLK_KP_OCTAL",
"SDLK_KP_DECIMAL",
"SDLK_KP_HEXADECIMAL",
"SDLK_LCTRL",
"SDLK_LSHIFT",
"SDLK_LALT",
"SDLK_LGUI",
"SDLK_RCTRL",
"SDLK_RSHIFT",
"SDLK_RALT",
"SDLK_RGUI",
"SDLK_MODE",
"SDLK_AUDIONEXT",
"SDLK_AUDIOPREV",
"SDLK_AUDIOSTOP",
"SDLK_AUDIOPLAY",
"SDLK_AUDIOMUTE",
"SDLK_MEDIASELECT",
"SDLK_WWW",
"SDLK_MAIL",
"SDLK_CALCULATOR",
"SDLK_COMPUTER",
"SDLK_AC_SEARCH",
"SDLK_AC_HOME",
"SDLK_AC_BACK",
"SDLK_AC_FORWARD",
"SDLK_AC_STOP",
"SDLK_AC_REFRESH",
"SDLK_AC_BOOKMARKS",
"SDLK_BRIGHTNESSDOWN",
"SDLK_BRIGHTNESSUP",
"SDLK_DISPLAYSWITCH",
"SDLK_KBDILLUMTOGGLE",
"SDLK_KBDILLUMDOWN",
"SDLK_KBDILLUMUP",
"SDLK_EJECT",
"SDLK_SLEEP",
"SDLK_APP1",
"SDLK_APP2",
"SDLK_AUDIOREWIND",
"SDLK_AUDIOFASTFORWARD",
"SDLK_SOFTLEFT",
"SDLK_SOFTRIGHT",
"SDLK_CALL",
"SDLK_ENDCALL",
)
SDL_KEYMAP = {key: getattr(SDL_KeyCode, key) for key in SDL_KEYS}
SDL_KEYMAP = {
"SDLK_UNKNOWN": 0,
"SDLK_FIRST": 0,
"SDLK_BACKSPACE": 8,
"SDLK_TAB": 9,
"SDLK_CLEAR": 12,
"SDLK_RETURN": 13,
"SDLK_PAUSE": 19,
"SDLK_ESCAPE": 27,
"SDLK_SPACE": 32,
"SDLK_EXCLAIM": 33,
"SDLK_QUOTEDBL": 34,
"SDLK_HASH": 35,
"SDLK_DOLLAR": 36,
"SDLK_AMPERSAND": 38,
"SDLK_QUOTE": 39,
"SDLK_LEFTPAREN": 40,
"SDLK_RIGHTPAREN": 41,
"SDLK_ASTERISK": 42,
"SDLK_PLUS": 43,
"SDLK_COMMA": 44,
"SDLK_MINUS": 45,
"SDLK_PERIOD": 46,
"SDLK_SLASH": 47,
"SDLK_0": 48,
"SDLK_1": 49,
"SDLK_2": 50,
"SDLK_3": 51,
"SDLK_4": 52,
"SDLK_5": 53,
"SDLK_6": 54,
"SDLK_7": 55,
"SDLK_8": 56,
"SDLK_9": 57,
"SDLK_COLON": 58,
"SDLK_SEMICOLON": 59,
"SDLK_LESS": 60,
"SDLK_EQUALS": 61,
"SDLK_GREATER": 62,
"SDLK_QUESTION": 63,
"SDLK_AT": 64,
"SDLK_LEFTBRACKET": 91,
"SDLK_BACKSLASH": 92,
"SDLK_RIGHTBRACKET": 93,
"SDLK_CARET": 94,
"SDLK_UNDERSCORE": 95,
"SDLK_BACKQUOTE": 96,
"SDLK_a": 97,
"SDLK_b": 98,
"SDLK_c": 99,
"SDLK_d": 100,
"SDLK_e": 101,
"SDLK_f": 102,
"SDLK_g": 103,
"SDLK_h": 104,
"SDLK_i": 105,
"SDLK_j": 106,
"SDLK_k": 107,
"SDLK_l": 108,
"SDLK_m": 109,
"SDLK_n": 110,
"SDLK_o": 111,
"SDLK_p": 112,
"SDLK_q": 113,
"SDLK_r": 114,
"SDLK_s": 115,
"SDLK_t": 116,
"SDLK_u": 117,
"SDLK_v": 118,
"SDLK_w": 119,
"SDLK_x": 120,
"SDLK_y": 121,
"SDLK_z": 122,
"SDLK_DELETE": 127,
"SDLK_WORLD_0": 160,
"SDLK_WORLD_1": 161,
"SDLK_WORLD_2": 162,
"SDLK_WORLD_3": 163,
"SDLK_WORLD_4": 164,
"SDLK_WORLD_5": 165,
"SDLK_WORLD_6": 166,
"SDLK_WORLD_7": 167,
"SDLK_WORLD_8": 168,
"SDLK_WORLD_9": 169,
"SDLK_WORLD_10": 170,
"SDLK_WORLD_11": 171,
"SDLK_WORLD_12": 172,
"SDLK_WORLD_13": 173,
"SDLK_WORLD_14": 174,
"SDLK_WORLD_15": 175,
"SDLK_WORLD_16": 176,
"SDLK_WORLD_17": 177,
"SDLK_WORLD_18": 178,
"SDLK_WORLD_19": 179,
"SDLK_WORLD_20": 180,
"SDLK_WORLD_21": 181,
"SDLK_WORLD_22": 182,
"SDLK_WORLD_23": 183,
"SDLK_WORLD_24": 184,
"SDLK_WORLD_25": 185,
"SDLK_WORLD_26": 186,
"SDLK_WORLD_27": 187,
"SDLK_WORLD_28": 188,
"SDLK_WORLD_29": 189,
"SDLK_WORLD_30": 190,
"SDLK_WORLD_31": 191,
"SDLK_WORLD_32": 192,
"SDLK_WORLD_33": 193,
"SDLK_WORLD_34": 194,
"SDLK_WORLD_35": 195,
"SDLK_WORLD_36": 196,
"SDLK_WORLD_37": 197,
"SDLK_WORLD_38": 198,
"SDLK_WORLD_39": 199,
"SDLK_WORLD_40": 200,
"SDLK_WORLD_41": 201,
"SDLK_WORLD_42": 202,
"SDLK_WORLD_43": 203,
"SDLK_WORLD_44": 204,
"SDLK_WORLD_45": 205,
"SDLK_WORLD_46": 206,
"SDLK_WORLD_47": 207,
"SDLK_WORLD_48": 208,
"SDLK_WORLD_49": 209,
"SDLK_WORLD_50": 210,
"SDLK_WORLD_51": 211,
"SDLK_WORLD_52": 212,
"SDLK_WORLD_53": 213,
"SDLK_WORLD_54": 214,
"SDLK_WORLD_55": 215,
"SDLK_WORLD_56": 216,
"SDLK_WORLD_57": 217,
"SDLK_WORLD_58": 218,
"SDLK_WORLD_59": 219,
"SDLK_WORLD_60": 220,
"SDLK_WORLD_61": 221,
"SDLK_WORLD_62": 222,
"SDLK_WORLD_63": 223,
"SDLK_WORLD_64": 224,
"SDLK_WORLD_65": 225,
"SDLK_WORLD_66": 226,
"SDLK_WORLD_67": 227,
"SDLK_WORLD_68": 228,
"SDLK_WORLD_69": 229,
"SDLK_WORLD_70": 230,
"SDLK_WORLD_71": 231,
"SDLK_WORLD_72": 232,
"SDLK_WORLD_73": 233,
"SDLK_WORLD_74": 234,
"SDLK_WORLD_75": 235,
"SDLK_WORLD_76": 236,
"SDLK_WORLD_77": 237,
"SDLK_WORLD_78": 238,
"SDLK_WORLD_79": 239,
"SDLK_WORLD_80": 240,
"SDLK_WORLD_81": 241,
"SDLK_WORLD_82": 242,
"SDLK_WORLD_83": 243,
"SDLK_WORLD_84": 244,
"SDLK_WORLD_85": 245,
"SDLK_WORLD_86": 246,
"SDLK_WORLD_87": 247,
"SDLK_WORLD_88": 248,
"SDLK_WORLD_89": 249,
"SDLK_WORLD_90": 250,
"SDLK_WORLD_91": 251,
"SDLK_WORLD_92": 252,
"SDLK_WORLD_93": 253,
"SDLK_WORLD_94": 254,
"SDLK_WORLD_95": 255,
"SDLK_KP0": 256,
"SDLK_KP1": 257,
"SDLK_KP2": 258,
"SDLK_KP3": 259,
"SDLK_KP4": 260,
"SDLK_KP5": 261,
"SDLK_KP6": 262,
"SDLK_KP7": 263,
"SDLK_KP8": 264,
"SDLK_KP9": 265,
"SDLK_KP_PERIOD": 266,
"SDLK_KP_DIVIDE": 267,
"SDLK_KP_MULTIPLY": 268,
"SDLK_KP_MINUS": 269,
"SDLK_KP_PLUS": 270,
"SDLK_KP_ENTER": 271,
"SDLK_KP_EQUALS": 272,
"SDLK_UP": 273,
"SDLK_DOWN": 274,
"SDLK_RIGHT": 275,
"SDLK_LEFT": 276,
"SDLK_INSERT": 277,
"SDLK_HOME": 278,
"SDLK_END": 279,
"SDLK_PAGEUP": 280,
"SDLK_PAGEDOWN": 281,
"SDLK_F1": 282,
"SDLK_F2": 283,
"SDLK_F3": 284,
"SDLK_F4": 285,
"SDLK_F5": 286,
"SDLK_F6": 287,
"SDLK_F7": 288,
"SDLK_F8": 289,
"SDLK_F9": 290,
"SDLK_F10": 291,
"SDLK_F11": 292,
"SDLK_F12": 293,
"SDLK_F13": 294,
"SDLK_F14": 295,
"SDLK_F15": 296,
"SDLK_NUMLOCK": 300,
"SDLK_CAPSLOCK": 301,
"SDLK_SCROLLOCK": 302,
"SDLK_RSHIFT": 303,
"SDLK_LSHIFT": 304,
"SDLK_RCTRL": 305,
"SDLK_LCTRL": 306,
"SDLK_RALT": 307,
"SDLK_LALT": 308,
"SDLK_RMETA": 309,
"SDLK_LMETA": 310,
"SDLK_LSUPER": 311,
"SDLK_RSUPER": 312,
"SDLK_MODE": 313,
"SDLK_COMPOSE": 314,
"SDLK_HELP": 315,
"SDLK_PRINT": 316,
"SDLK_SYSREQ": 317,
"SDLK_BREAK": 318,
"SDLK_MENU": 319,
"SDLK_POWER": 320,
"SDLK_EURO": 321,
"SDLK_UNDO": 322,
}
CONFIG_SCHEMA = (
binary_sensor.binary_sensor_schema(BinarySensor)

View File

@@ -1,5 +1,7 @@
#pragma once
#include <set>
#include "esphome/core/component.h"
#include "esphome/components/output/binary_output.h"
#include "esphome/components/output/float_output.h"
@@ -16,7 +18,7 @@ class SpeedFan : public Component, public fan::Fan {
void set_output(output::FloatOutput *output) { this->output_ = output; }
void set_oscillating(output::BinaryOutput *oscillating) { this->oscillating_ = oscillating; }
void set_direction(output::BinaryOutput *direction) { this->direction_ = direction; }
void set_preset_modes(std::initializer_list<const char *> presets) { this->preset_modes_ = presets; }
void set_preset_modes(const std::set<std::string> &presets) { this->preset_modes_ = presets; }
fan::FanTraits get_traits() override { return this->traits_; }
protected:
@@ -28,7 +30,7 @@ class SpeedFan : public Component, public fan::Fan {
output::BinaryOutput *direction_{nullptr};
int speed_count_{};
fan::FanTraits traits_;
std::vector<const char *> preset_modes_{};
std::set<std::string> preset_modes_{};
};
} // namespace speed

View File

@@ -9,7 +9,7 @@ namespace st7920 {
class ST7920;
using st7920_writer_t = std::function<void(ST7920 &)>;
using st7920_writer_t = display::DisplayWriter<ST7920>;
class ST7920 : public display::DisplayBuffer,
public spi::SPIDevice<spi::BIT_ORDER_MSB_FIRST, spi::CLOCK_POLARITY_HIGH, spi::CLOCK_PHASE_TRAILING,
@@ -44,7 +44,7 @@ class ST7920 : public display::DisplayBuffer,
void end_transaction_();
int16_t width_ = 128, height_ = 64;
optional<st7920_writer_t> writer_local_{};
st7920_writer_t writer_local_{};
};
} // namespace st7920

View File

@@ -138,7 +138,6 @@ def _concat_nodes_override(values: Iterator[Any]) -> Any:
values = chain(head, values)
raw = "".join([str(v) for v in values])
result = None
try:
# Attempt to parse the concatenated string into a Python literal.
# This allows expressions like "1 + 2" to be evaluated to the integer 3.
@@ -146,16 +145,11 @@ def _concat_nodes_override(values: Iterator[Any]) -> Any:
# fall back to returning the raw string. This is consistent with
# Home Assistant's behavior when evaluating templates
result = literal_eval(raw)
except (ValueError, SyntaxError, MemoryError, TypeError):
pass
else:
if isinstance(result, set):
# Sets are not supported, return raw string
return raw
if not isinstance(result, str):
return result
except (ValueError, SyntaxError, MemoryError, TypeError):
pass
return raw

View File

@@ -1,5 +1,7 @@
#pragma once
#include <set>
#include "esphome/core/component.h"
#include "esphome/components/fan/fan.h"
@@ -14,7 +16,7 @@ class TemplateFan : public Component, public fan::Fan {
void set_has_direction(bool has_direction) { this->has_direction_ = has_direction; }
void set_has_oscillating(bool has_oscillating) { this->has_oscillating_ = has_oscillating; }
void set_speed_count(int count) { this->speed_count_ = count; }
void set_preset_modes(std::initializer_list<const char *> presets) { this->preset_modes_ = presets; }
void set_preset_modes(const std::set<std::string> &presets) { this->preset_modes_ = presets; }
fan::FanTraits get_traits() override { return this->traits_; }
protected:
@@ -24,7 +26,7 @@ class TemplateFan : public Component, public fan::Fan {
bool has_direction_{false};
int speed_count_{0};
fan::FanTraits traits_;
std::vector<const char *> preset_modes_{};
std::set<std::string> preset_modes_{};
};
} // namespace template_

View File

@@ -3,13 +3,14 @@
#include "esphome/core/component.h"
#include "esphome/core/defines.h"
#include "esphome/core/hal.h"
#include "esphome/components/display/display.h"
namespace esphome {
namespace tm1621 {
class TM1621Display;
using tm1621_writer_t = std::function<void(TM1621Display &)>;
using tm1621_writer_t = display::DisplayWriter<TM1621Display>;
class TM1621Display : public PollingComponent {
public:
@@ -59,7 +60,7 @@ class TM1621Display : public PollingComponent {
GPIOPin *cs_pin_;
GPIOPin *read_pin_;
GPIOPin *write_pin_;
optional<tm1621_writer_t> writer_{};
tm1621_writer_t writer_{};
char row_[2][12];
uint8_t state_;
uint8_t device_;

View File

@@ -4,6 +4,7 @@
#include "esphome/core/defines.h"
#include "esphome/core/hal.h"
#include "esphome/core/time.h"
#include "esphome/components/display/display.h"
#include <vector>
@@ -19,7 +20,7 @@ class TM1637Display;
class TM1637Key;
#endif
using tm1637_writer_t = std::function<void(TM1637Display &)>;
using tm1637_writer_t = display::DisplayWriter<TM1637Display>;
class TM1637Display : public PollingComponent {
public:
@@ -78,7 +79,7 @@ class TM1637Display : public PollingComponent {
uint8_t length_;
bool inverted_;
bool on_{true};
optional<tm1637_writer_t> writer_{};
tm1637_writer_t writer_{};
uint8_t buffer_[6] = {0};
#ifdef USE_BINARY_SENSOR
std::vector<TM1637Key *> tm1637_keys_{};

View File

@@ -5,6 +5,7 @@
#include "esphome/core/defines.h"
#include "esphome/core/hal.h"
#include "esphome/core/time.h"
#include "esphome/components/display/display.h"
#include <vector>
@@ -18,7 +19,7 @@ class KeyListener {
class TM1638Component;
using tm1638_writer_t = std::function<void(TM1638Component &)>;
using tm1638_writer_t = display::DisplayWriter<TM1638Component>;
class TM1638Component : public PollingComponent {
public:
@@ -70,7 +71,7 @@ class TM1638Component : public PollingComponent {
GPIOPin *stb_pin_;
GPIOPin *dio_pin_;
uint8_t *buffer_ = new uint8_t[8];
optional<tm1638_writer_t> writer_{};
tm1638_writer_t writer_{};
std::vector<KeyListener *> listeners_{};
};

View File

@@ -111,7 +111,8 @@ void DeferredUpdateEventSource::deq_push_back_with_dedup_(void *source, message_
// Use range-based for loop instead of std::find_if to reduce template instantiation overhead and binary size
for (auto &event : this->deferred_queue_) {
if (event == item) {
return; // Already in queue, no need to update since items are equal
event = item;
return;
}
}
this->deferred_queue_.push_back(item);
@@ -219,51 +220,50 @@ void DeferredUpdateEventSourceList::add_new_client(WebServer *ws, AsyncWebServer
DeferredUpdateEventSource *es = new DeferredUpdateEventSource(ws, "/events");
this->push_back(es);
es->onConnect([this, es](AsyncEventSourceClient *client) { this->on_client_connect_(es); });
es->onConnect([this, ws, es](AsyncEventSourceClient *client) {
ws->defer([this, ws, es]() { this->on_client_connect_(ws, es); });
});
es->onDisconnect([this, es](AsyncEventSourceClient *client) { this->on_client_disconnect_(es); });
es->onDisconnect([this, ws, es](AsyncEventSourceClient *client) {
ws->defer([this, es]() { this->on_client_disconnect_((DeferredUpdateEventSource *) es); });
});
es->handleRequest(request);
}
void DeferredUpdateEventSourceList::on_client_connect_(DeferredUpdateEventSource *source) {
WebServer *ws = source->web_server_;
ws->defer([ws, source]() {
// Configure reconnect timeout and send config
// this should always go through since the AsyncEventSourceClient event queue is empty on connect
std::string message = ws->get_config_json();
source->try_send_nodefer(message.c_str(), "ping", millis(), 30000);
void DeferredUpdateEventSourceList::on_client_connect_(WebServer *ws, DeferredUpdateEventSource *source) {
// Configure reconnect timeout and send config
// this should always go through since the AsyncEventSourceClient event queue is empty on connect
std::string message = ws->get_config_json();
source->try_send_nodefer(message.c_str(), "ping", millis(), 30000);
#ifdef USE_WEBSERVER_SORTING
for (auto &group : ws->sorting_groups_) {
json::JsonBuilder builder;
JsonObject root = builder.root();
root["name"] = group.second.name;
root["sorting_weight"] = group.second.weight;
message = builder.serialize();
for (auto &group : ws->sorting_groups_) {
json::JsonBuilder builder;
JsonObject root = builder.root();
root["name"] = group.second.name;
root["sorting_weight"] = group.second.weight;
message = builder.serialize();
// up to 31 groups should be able to be queued initially without defer
source->try_send_nodefer(message.c_str(), "sorting_group");
}
// up to 31 groups should be able to be queued initially without defer
source->try_send_nodefer(message.c_str(), "sorting_group");
}
#endif
source->entities_iterator_.begin(ws->include_internal_);
source->entities_iterator_.begin(ws->include_internal_);
// just dump them all up-front and take advantage of the deferred queue
// on second thought that takes too long, but leaving the commented code here for debug purposes
// while(!source->entities_iterator_.completed()) {
// source->entities_iterator_.advance();
//}
});
// just dump them all up-front and take advantage of the deferred queue
// on second thought that takes too long, but leaving the commented code here for debug purposes
// while(!source->entities_iterator_.completed()) {
// source->entities_iterator_.advance();
//}
}
void DeferredUpdateEventSourceList::on_client_disconnect_(DeferredUpdateEventSource *source) {
source->web_server_->defer([this, source]() {
// This method was called via WebServer->defer() and is no longer executing in the
// context of the network callback. The object is now dead and can be safely deleted.
this->remove(source);
delete source; // NOLINT
});
// This method was called via WebServer->defer() and is no longer executing in the
// context of the network callback. The object is now dead and can be safely deleted.
this->remove(source);
delete source; // NOLINT
}
#endif
@@ -435,10 +435,9 @@ void WebServer::on_sensor_update(sensor::Sensor *obj, float state) {
}
void WebServer::handle_sensor_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (sensor::Sensor *obj : App.get_sensors()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
// Note: request->method() is always HTTP_GET here (canHandle ensures this)
if (match.method_empty()) {
if (request->method() == HTTP_GET && match.method_empty()) {
auto detail = get_request_detail(request);
std::string data = this->sensor_json(obj, obj->state, detail);
request->send(200, "application/json", data.c_str());
@@ -478,10 +477,9 @@ void WebServer::on_text_sensor_update(text_sensor::TextSensor *obj, const std::s
}
void WebServer::handle_text_sensor_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (text_sensor::TextSensor *obj : App.get_text_sensors()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
// Note: request->method() is always HTTP_GET here (canHandle ensures this)
if (match.method_empty()) {
if (request->method() == HTTP_GET && match.method_empty()) {
auto detail = get_request_detail(request);
std::string data = this->text_sensor_json(obj, obj->state, detail);
request->send(200, "application/json", data.c_str());
@@ -518,7 +516,7 @@ void WebServer::on_switch_update(switch_::Switch *obj, bool state) {
}
void WebServer::handle_switch_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (switch_::Switch *obj : App.get_switches()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -587,7 +585,7 @@ std::string WebServer::switch_json(switch_::Switch *obj, bool value, JsonDetail
#ifdef USE_BUTTON
void WebServer::handle_button_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (button::Button *obj : App.get_buttons()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
auto detail = get_request_detail(request);
@@ -629,10 +627,9 @@ void WebServer::on_binary_sensor_update(binary_sensor::BinarySensor *obj) {
}
void WebServer::handle_binary_sensor_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (binary_sensor::BinarySensor *obj : App.get_binary_sensors()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
// Note: request->method() is always HTTP_GET here (canHandle ensures this)
if (match.method_empty()) {
if (request->method() == HTTP_GET && match.method_empty()) {
auto detail = get_request_detail(request);
std::string data = this->binary_sensor_json(obj, obj->state, detail);
request->send(200, "application/json", data.c_str());
@@ -668,7 +665,7 @@ void WebServer::on_fan_update(fan::Fan *obj) {
}
void WebServer::handle_fan_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (fan::Fan *obj : App.get_fans()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -742,7 +739,7 @@ void WebServer::on_light_update(light::LightState *obj) {
}
void WebServer::handle_light_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (light::LightState *obj : App.get_lights()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -815,7 +812,7 @@ void WebServer::on_cover_update(cover::Cover *obj) {
}
void WebServer::handle_cover_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (cover::Cover *obj : App.get_covers()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -900,7 +897,7 @@ void WebServer::on_number_update(number::Number *obj, float state) {
}
void WebServer::handle_number_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (auto *obj : App.get_numbers()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -965,7 +962,7 @@ void WebServer::on_date_update(datetime::DateEntity *obj) {
}
void WebServer::handle_date_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (auto *obj : App.get_dates()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
auto detail = get_request_detail(request);
@@ -1020,7 +1017,7 @@ void WebServer::on_time_update(datetime::TimeEntity *obj) {
}
void WebServer::handle_time_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (auto *obj : App.get_times()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
auto detail = get_request_detail(request);
@@ -1074,7 +1071,7 @@ void WebServer::on_datetime_update(datetime::DateTimeEntity *obj) {
}
void WebServer::handle_datetime_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (auto *obj : App.get_datetimes()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
auto detail = get_request_detail(request);
@@ -1129,7 +1126,7 @@ void WebServer::on_text_update(text::Text *obj, const std::string &state) {
}
void WebServer::handle_text_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (auto *obj : App.get_texts()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -1183,7 +1180,7 @@ void WebServer::on_select_update(select::Select *obj, const std::string &state,
}
void WebServer::handle_select_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (auto *obj : App.get_selects()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -1239,7 +1236,7 @@ void WebServer::on_climate_update(climate::Climate *obj) {
}
void WebServer::handle_climate_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (auto *obj : App.get_climates()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -1380,7 +1377,7 @@ void WebServer::on_lock_update(lock::Lock *obj) {
}
void WebServer::handle_lock_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (lock::Lock *obj : App.get_locks()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -1451,7 +1448,7 @@ void WebServer::on_valve_update(valve::Valve *obj) {
}
void WebServer::handle_valve_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (valve::Valve *obj : App.get_valves()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -1532,7 +1529,7 @@ void WebServer::on_alarm_control_panel_update(alarm_control_panel::AlarmControlP
}
void WebServer::handle_alarm_control_panel_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (alarm_control_panel::AlarmControlPanel *obj : App.get_alarm_control_panels()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {
@@ -1611,11 +1608,10 @@ void WebServer::on_event(event::Event *obj, const std::string &event_type) {
void WebServer::handle_event_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (event::Event *obj : App.get_events()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
// Note: request->method() is always HTTP_GET here (canHandle ensures this)
if (match.method_empty()) {
if (request->method() == HTTP_GET && match.method_empty()) {
auto detail = get_request_detail(request);
std::string data = this->event_json(obj, "", detail);
request->send(200, "application/json", data.c_str());
@@ -1677,7 +1673,7 @@ void WebServer::on_update(update::UpdateEntity *obj) {
}
void WebServer::handle_update_request(AsyncWebServerRequest *request, const UrlMatch &match) {
for (update::UpdateEntity *obj : App.get_updates()) {
if (!match.id_equals_entity(obj))
if (!match.id_equals(obj->get_object_id()))
continue;
if (request->method() == HTTP_GET && match.method_empty()) {

View File

@@ -48,15 +48,8 @@ struct UrlMatch {
return domain && domain_len == strlen(str) && memcmp(domain, str, domain_len) == 0;
}
bool id_equals_entity(EntityBase *entity) const {
// Zero-copy comparison using StringRef
StringRef static_ref = entity->get_object_id_ref_for_api_();
if (!static_ref.empty()) {
return id && id_len == static_ref.size() && memcmp(id, static_ref.c_str(), id_len) == 0;
}
// Fallback to allocation (rare)
const auto &obj_id = entity->get_object_id();
return id && id_len == obj_id.length() && memcmp(id, obj_id.c_str(), id_len) == 0;
bool id_equals(const std::string &str) const {
return id && id_len == str.length() && memcmp(id, str.c_str(), id_len) == 0;
}
bool method_equals(const char *str) const {
@@ -148,7 +141,7 @@ class DeferredUpdateEventSource : public AsyncEventSource {
class DeferredUpdateEventSourceList : public std::list<DeferredUpdateEventSource *> {
protected:
void on_client_connect_(DeferredUpdateEventSource *source);
void on_client_connect_(WebServer *ws, DeferredUpdateEventSource *source);
void on_client_disconnect_(DeferredUpdateEventSource *source);
public:

View File

@@ -4,7 +4,6 @@
#include <memory>
#include <cstring>
#include <cctype>
#include <cinttypes>
#include "esphome/core/helpers.h"
#include "esphome/core/log.h"
@@ -246,8 +245,8 @@ void AsyncWebServerRequest::redirect(const std::string &url) {
}
void AsyncWebServerRequest::init_response_(AsyncWebServerResponse *rsp, int code, const char *content_type) {
// Set status code - use constants for common codes, default to 500 for unknown codes
const char *status;
// Set status code - use constants for common codes to avoid string allocation
const char *status = nullptr;
switch (code) {
case 200:
status = HTTPD_200;
@@ -259,10 +258,9 @@ void AsyncWebServerRequest::init_response_(AsyncWebServerResponse *rsp, int code
status = HTTPD_409;
break;
default:
status = HTTPD_500;
break;
}
httpd_resp_set_status(*this, status);
httpd_resp_set_status(*this, status == nullptr ? to_string(code).c_str() : status);
if (content_type && *content_type) {
httpd_resp_set_type(*this, content_type);
@@ -350,13 +348,7 @@ void AsyncWebServerResponse::addHeader(const char *name, const char *value) {
httpd_resp_set_hdr(*this->req_, name, value);
}
void AsyncResponseStream::print(float value) {
// Use stack buffer to avoid temporary string allocation
// Size: sign (1) + digits (10) + decimal (1) + precision (6) + exponent (5) + null (1) = 24, use 32 for safety
char buf[32];
int len = snprintf(buf, sizeof(buf), "%f", value);
this->content_.append(buf, len);
}
void AsyncResponseStream::print(float value) { this->print(to_string(value)); }
void AsyncResponseStream::printf(const char *fmt, ...) {
va_list args;
@@ -502,7 +494,8 @@ void AsyncEventSourceResponse::deq_push_back_with_dedup_(void *source, message_g
// Use range-based for loop instead of std::find_if to reduce template instantiation overhead and binary size
for (auto &event : this->deferred_queue_) {
if (event == item) {
return; // Already in queue, no need to update since items are equal
event = item;
return;
}
}
this->deferred_queue_.push_back(item);
@@ -601,19 +594,16 @@ bool AsyncEventSourceResponse::try_send_nodefer(const char *message, const char
event_buffer_.append(chunk_len_header);
// Use stack buffer for formatting numeric fields to avoid temporary string allocations
// Size: "retry: " (7) + max uint32 (10 digits) + CRLF (2) + null (1) = 20 bytes, use 32 for safety
constexpr size_t num_buf_size = 32;
char num_buf[num_buf_size];
if (reconnect) {
int len = snprintf(num_buf, num_buf_size, "retry: %" PRIu32 CRLF_STR, reconnect);
event_buffer_.append(num_buf, len);
event_buffer_.append("retry: ", sizeof("retry: ") - 1);
event_buffer_.append(to_string(reconnect));
event_buffer_.append(CRLF_STR, CRLF_LEN);
}
if (id) {
int len = snprintf(num_buf, num_buf_size, "id: %" PRIu32 CRLF_STR, id);
event_buffer_.append(num_buf, len);
event_buffer_.append("id: ", sizeof("id: ") - 1);
event_buffer_.append(to_string(id));
event_buffer_.append(CRLF_STR, CRLF_LEN);
}
if (event && *event) {

View File

@@ -3,7 +3,7 @@
#include <zephyr/kernel.h>
#include <zephyr/drivers/watchdog.h>
#include <zephyr/sys/reboot.h>
#include <zephyr/random/random.h>
#include <zephyr/random/rand32.h>
#include "esphome/core/hal.h"
#include "esphome/core/helpers.h"

View File

@@ -8,8 +8,8 @@ namespace zephyr {
static const char *const TAG = "zephyr";
static gpio_flags_t flags_to_mode(gpio::Flags flags, bool inverted, bool value) {
gpio_flags_t ret = 0;
static int flags_to_mode(gpio::Flags flags, bool inverted, bool value) {
int ret = 0;
if (flags & gpio::FLAG_INPUT) {
ret |= GPIO_INPUT;
}
@@ -79,10 +79,7 @@ void ZephyrGPIOPin::pin_mode(gpio::Flags flags) {
if (nullptr == this->gpio_) {
return;
}
auto ret = gpio_pin_configure(this->gpio_, this->pin_ % 32, flags_to_mode(flags, this->inverted_, this->value_));
if (ret != 0) {
ESP_LOGE(TAG, "gpio %u cannot be configured %d.", this->pin_, ret);
}
gpio_pin_configure(this->gpio_, this->pin_ % 32, flags_to_mode(flags, this->inverted_, this->value_));
}
std::string ZephyrGPIOPin::dump_summary() const {

View File

@@ -2,7 +2,6 @@
from __future__ import annotations
from collections.abc import Callable
from contextlib import contextmanager, suppress
from dataclasses import dataclass
from datetime import datetime
@@ -19,7 +18,6 @@ import logging
from pathlib import Path
import re
from string import ascii_letters, digits
import typing
import uuid as uuid_
import voluptuous as vol
@@ -1765,37 +1763,16 @@ class SplitDefault(Optional):
class OnlyWith(Optional):
"""Set the default value only if the given component(s) is/are loaded.
"""Set the default value only if the given component is loaded."""
This validator allows configuration keys to have defaults that are only applied
when specific component(s) are loaded. Supports both single component names and
lists of components.
Args:
key: Configuration key
component: Single component name (str) or list of component names.
For lists, ALL components must be loaded for the default to apply.
default: Default value to use when condition is met
Example:
# Single component
cv.OnlyWith(CONF_MQTT_ID, "mqtt"): cv.declare_id(MQTTComponent)
# Multiple components (all must be loaded)
cv.OnlyWith(CONF_ZIGBEE_ID, ["zigbee", "nrf52"]): cv.use_id(Zigbee)
"""
def __init__(self, key, component: str | list[str], default=None) -> None:
def __init__(self, key, component, default=None):
super().__init__(key)
self._component = component
self._default = vol.default_factory(default)
@property
def default(self) -> Callable[[], typing.Any] | vol.Undefined:
if isinstance(self._component, list):
if all(c in CORE.loaded_integrations for c in self._component):
return self._default
elif self._component in CORE.loaded_integrations:
def default(self):
if self._component in CORE.loaded_integrations:
return self._default
return vol.UNDEFINED

View File

@@ -576,11 +576,10 @@ void Application::yield_with_select_(uint32_t delay_ms) {
// Update fd_set if socket list has changed
if (this->socket_fds_changed_) {
FD_ZERO(&this->base_read_fds_);
// fd bounds are already validated in register_socket_fd() or guaranteed by platform design:
// - ESP32: LwIP guarantees fd < FD_SETSIZE by design (LWIP_SOCKET_OFFSET = FD_SETSIZE - CONFIG_LWIP_MAX_SOCKETS)
// - Other platforms: register_socket_fd() validates fd < FD_SETSIZE
for (int fd : this->socket_fds_) {
FD_SET(fd, &this->base_read_fds_);
if (fd >= 0 && fd < FD_SETSIZE) {
FD_SET(fd, &this->base_read_fds_);
}
}
this->socket_fds_changed_ = false;
}

View File

@@ -10,7 +10,6 @@
#include "esphome/core/helpers.h"
#include <vector>
#include <forward_list>
namespace esphome {
@@ -269,28 +268,32 @@ template<typename... Ts> class WhileAction : public Action<Ts...> {
void add_then(const std::initializer_list<Action<Ts...> *> &actions) {
this->then_.add_actions(actions);
this->then_.add_action(new LambdaAction<Ts...>([this](Ts... x) {
if (this->num_running_ > 0 && this->condition_->check(x...)) {
if (this->num_running_ > 0 && this->condition_->check_tuple(this->var_)) {
// play again
this->then_.play(x...);
if (this->num_running_ > 0) {
this->then_.play_tuple(this->var_);
}
} else {
// condition false, play next
this->play_next_(x...);
this->play_next_tuple_(this->var_);
}
}));
}
void play_complex(Ts... x) override {
this->num_running_++;
// Store loop parameters
this->var_ = std::make_tuple(x...);
// Initial condition check
if (!this->condition_->check(x...)) {
if (!this->condition_->check_tuple(this->var_)) {
// If new condition check failed, stop loop if running
this->then_.stop();
this->play_next_(x...);
this->play_next_tuple_(this->var_);
return;
}
if (this->num_running_ > 0) {
this->then_.play(x...);
this->then_.play_tuple(this->var_);
}
}
@@ -302,6 +305,7 @@ template<typename... Ts> class WhileAction : public Action<Ts...> {
protected:
Condition<Ts...> *condition_;
ActionList<Ts...> then_;
std::tuple<Ts...> var_{};
};
template<typename... Ts> class RepeatAction : public Action<Ts...> {
@@ -313,7 +317,7 @@ template<typename... Ts> class RepeatAction : public Action<Ts...> {
this->then_.add_action(new LambdaAction<uint32_t, Ts...>([this](uint32_t iteration, Ts... x) {
iteration++;
if (iteration >= this->count_.value(x...)) {
this->play_next_(x...);
this->play_next_tuple_(this->var_);
} else {
this->then_.play(iteration, x...);
}
@@ -322,10 +326,11 @@ template<typename... Ts> class RepeatAction : public Action<Ts...> {
void play_complex(Ts... x) override {
this->num_running_++;
this->var_ = std::make_tuple(x...);
if (this->count_.value(x...) > 0) {
this->then_.play(0, x...);
} else {
this->play_next_(x...);
this->play_next_tuple_(this->var_);
}
}
@@ -336,26 +341,15 @@ template<typename... Ts> class RepeatAction : public Action<Ts...> {
protected:
ActionList<uint32_t, Ts...> then_;
std::tuple<Ts...> var_;
};
/** Wait until a condition is true to continue execution.
*
* Uses queue-based storage to safely handle concurrent executions.
* While concurrent execution from the same trigger is uncommon, it's possible
* (e.g., rapid button presses, high-frequency sensor updates), so we use
* queue-based storage for correctness.
*/
template<typename... Ts> class WaitUntilAction : public Action<Ts...>, public Component {
public:
WaitUntilAction(Condition<Ts...> *condition) : condition_(condition) {}
TEMPLATABLE_VALUE(uint32_t, timeout_value)
void setup() override {
// Start with loop disabled - only enable when there's work to do
this->disable_loop();
}
void play_complex(Ts... x) override {
this->num_running_++;
// Check if we can continue immediately.
@@ -365,14 +359,13 @@ template<typename... Ts> class WaitUntilAction : public Action<Ts...>, public Co
}
return;
}
this->var_ = std::make_tuple(x...);
// Store for later processing
auto now = millis();
auto timeout = this->timeout_value_.optional_value(x...);
this->var_queue_.emplace_front(now, timeout, std::make_tuple(x...));
if (this->timeout_value_.has_value()) {
auto f = std::bind(&WaitUntilAction<Ts...>::play_next_, this, x...);
this->set_timeout("timeout", this->timeout_value_.value(x...), f);
}
// Enable loop now that we have work to do
this->enable_loop();
this->loop();
}
@@ -380,32 +373,13 @@ template<typename... Ts> class WaitUntilAction : public Action<Ts...>, public Co
if (this->num_running_ == 0)
return;
auto now = millis();
this->var_queue_.remove_if([&](auto &queued) {
auto start = std::get<uint32_t>(queued);
auto timeout = std::get<optional<uint32_t>>(queued);
auto &var = std::get<std::tuple<Ts...>>(queued);
auto expired = timeout && (now - start) >= *timeout;
if (!expired && !this->condition_->check_tuple(var)) {
return false;
}
this->play_next_tuple_(var);
return true;
});
// If queue is now empty, disable loop until next play_complex
if (this->var_queue_.empty()) {
this->disable_loop();
if (!this->condition_->check_tuple(this->var_)) {
return;
}
}
void stop() override {
this->var_queue_.clear();
this->disable_loop();
this->cancel_timeout("timeout");
this->play_next_tuple_(this->var_);
}
float get_setup_priority() const override { return setup_priority::DATA; }
@@ -413,9 +387,11 @@ template<typename... Ts> class WaitUntilAction : public Action<Ts...>, public Co
void play(Ts... x) override { /* ignore - see play_complex */
}
void stop() override { this->cancel_timeout("timeout"); }
protected:
Condition<Ts...> *condition_;
std::forward_list<std::tuple<uint32_t, optional<uint32_t>, std::tuple<Ts...>>> var_queue_{};
std::tuple<Ts...> var_{};
};
template<typename... Ts> class UpdateComponentAction : public Action<Ts...> {

View File

@@ -284,7 +284,6 @@ bool Component::is_ready() const {
(this->component_state_ & COMPONENT_STATE_MASK) == COMPONENT_STATE_LOOP_DONE ||
(this->component_state_ & COMPONENT_STATE_MASK) == COMPONENT_STATE_SETUP;
}
bool Component::is_idle() const { return (this->component_state_ & COMPONENT_STATE_MASK) == COMPONENT_STATE_LOOP_DONE; }
bool Component::can_proceed() { return true; }
bool Component::status_has_warning() const { return this->component_state_ & STATUS_LED_WARNING; }
bool Component::status_has_error() const { return this->component_state_ & STATUS_LED_ERROR; }

View File

@@ -141,14 +141,6 @@ class Component {
*/
bool is_in_loop_state() const;
/** Check if this component is idle.
* Being idle means being in LOOP_DONE state.
* This means the component has completed setup, is not failed, but its loop is currently disabled.
*
* @return True if the component is idle
*/
bool is_idle() const;
/** Mark this component as failed. Any future timeouts/intervals/setup/loop will no longer be called.
*
* This might be useful if a component wants to indicate that a connection to its peripheral failed.

View File

@@ -17,10 +17,6 @@ namespace api {
class APIConnection;
} // namespace api
namespace web_server {
struct UrlMatch;
} // namespace web_server
enum EntityCategory : uint8_t {
ENTITY_CATEGORY_NONE = 0,
ENTITY_CATEGORY_CONFIG = 1,
@@ -120,7 +116,6 @@ class EntityBase {
protected:
friend class api::APIConnection;
friend struct web_server::UrlMatch;
// Get object_id as StringRef when it's static (for API usage)
// Returns empty StringRef if object_id is dynamic (needs allocation)

View File

@@ -316,37 +316,59 @@ optional<uint32_t> HOT Scheduler::next_schedule_in(uint32_t now) {
return 0;
return next_exec - now_64;
}
void Scheduler::full_cleanup_removed_items_() {
// We hold the lock for the entire cleanup operation because:
// 1. We're rebuilding the entire items_ list, so we need exclusive access throughout
// 2. Other threads must see either the old state or the new state, not intermediate states
// 3. The operation is already expensive (O(n)), so lock overhead is negligible
// 4. No operations inside can block or take other locks, so no deadlock risk
LockGuard guard{this->lock_};
std::vector<std::unique_ptr<SchedulerItem>> valid_items;
// Move all non-removed items to valid_items, recycle removed ones
for (auto &item : this->items_) {
if (!is_item_removed_(item.get())) {
valid_items.push_back(std::move(item));
} else {
// Recycle removed items
this->recycle_item_(std::move(item));
}
}
// Replace items_ with the filtered list
this->items_ = std::move(valid_items);
// Rebuild the heap structure since items are no longer in heap order
std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
this->to_remove_ = 0;
}
void HOT Scheduler::call(uint32_t now) {
#ifndef ESPHOME_THREAD_SINGLE
this->process_defer_queue_(now);
// Process defer queue first to guarantee FIFO execution order for deferred items.
// Previously, defer() used the heap which gave undefined order for equal timestamps,
// causing race conditions on multi-core systems (ESP32, BK7200).
// With the defer queue:
// - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
// - Items execute in exact order they were deferred (FIFO guarantee)
// - No deferred items exist in to_add_, so processing order doesn't affect correctness
// Single-core platforms don't use this queue and fall back to the heap-based approach.
//
// Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
// processed here. They are skipped during execution by should_skip_item_().
// This is intentional - no memory leak occurs.
//
// We use an index (defer_queue_front_) to track the read position instead of calling
// erase() on every pop, which would be O(n). The queue is processed once per loop -
// any items added during processing are left for the next loop iteration.
// Snapshot the queue end point - only process items that existed at loop start
// Items added during processing (by callbacks or other threads) run next loop
// No lock needed: single consumer (main loop), stale read just means we process less this iteration
size_t defer_queue_end = this->defer_queue_.size();
while (this->defer_queue_front_ < defer_queue_end) {
std::unique_ptr<SchedulerItem> item;
{
LockGuard lock(this->lock_);
// SAFETY: Moving out the unique_ptr leaves a nullptr in the vector at defer_queue_front_.
// This is intentional and safe because:
// 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
// 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_
// and has_cancelled_timeout_in_container_ in scheduler.h)
// 3. The lock protects concurrent access, but the nullptr remains until cleanup
item = std::move(this->defer_queue_[this->defer_queue_front_]);
this->defer_queue_front_++;
}
// Execute callback without holding lock to prevent deadlocks
// if the callback tries to call defer() again
if (!this->should_skip_item_(item.get())) {
now = this->execute_item_(item.get(), now);
}
// Recycle the defer item after execution
this->recycle_item_(std::move(item));
}
// If we've consumed all items up to the snapshot point, clean up the dead space
// Single consumer (main loop), so no lock needed for this check
if (this->defer_queue_front_ >= defer_queue_end) {
LockGuard lock(this->lock_);
this->cleanup_defer_queue_locked_();
}
#endif /* not ESPHOME_THREAD_SINGLE */
// Convert the fresh timestamp from main loop to 64-bit for scheduler operations
@@ -407,7 +429,30 @@ void HOT Scheduler::call(uint32_t now) {
// If we still have too many cancelled items, do a full cleanup
// This only happens if cancelled items are stuck in the middle/bottom of the heap
if (this->to_remove_ >= MAX_LOGICALLY_DELETED_ITEMS) {
this->full_cleanup_removed_items_();
// We hold the lock for the entire cleanup operation because:
// 1. We're rebuilding the entire items_ list, so we need exclusive access throughout
// 2. Other threads must see either the old state or the new state, not intermediate states
// 3. The operation is already expensive (O(n)), so lock overhead is negligible
// 4. No operations inside can block or take other locks, so no deadlock risk
LockGuard guard{this->lock_};
std::vector<std::unique_ptr<SchedulerItem>> valid_items;
// Move all non-removed items to valid_items, recycle removed ones
for (auto &item : this->items_) {
if (!is_item_removed_(item.get())) {
valid_items.push_back(std::move(item));
} else {
// Recycle removed items
this->recycle_item_(std::move(item));
}
}
// Replace items_ with the filtered list
this->items_ = std::move(valid_items);
// Rebuild the heap structure since items are no longer in heap order
std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
this->to_remove_ = 0;
}
while (!this->items_.empty()) {
// Don't copy-by value yet

View File

@@ -263,65 +263,7 @@ class Scheduler {
// Helper to recycle a SchedulerItem
void recycle_item_(std::unique_ptr<SchedulerItem> item);
// Helper to perform full cleanup when too many items are cancelled
void full_cleanup_removed_items_();
#ifndef ESPHOME_THREAD_SINGLE
// Helper to process defer queue - inline for performance in hot path
inline void process_defer_queue_(uint32_t &now) {
// Process defer queue first to guarantee FIFO execution order for deferred items.
// Previously, defer() used the heap which gave undefined order for equal timestamps,
// causing race conditions on multi-core systems (ESP32, BK7200).
// With the defer queue:
// - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
// - Items execute in exact order they were deferred (FIFO guarantee)
// - No deferred items exist in to_add_, so processing order doesn't affect correctness
// Single-core platforms don't use this queue and fall back to the heap-based approach.
//
// Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
// processed here. They are skipped during execution by should_skip_item_().
// This is intentional - no memory leak occurs.
//
// We use an index (defer_queue_front_) to track the read position instead of calling
// erase() on every pop, which would be O(n). The queue is processed once per loop -
// any items added during processing are left for the next loop iteration.
// Snapshot the queue end point - only process items that existed at loop start
// Items added during processing (by callbacks or other threads) run next loop
// No lock needed: single consumer (main loop), stale read just means we process less this iteration
size_t defer_queue_end = this->defer_queue_.size();
while (this->defer_queue_front_ < defer_queue_end) {
std::unique_ptr<SchedulerItem> item;
{
LockGuard lock(this->lock_);
// SAFETY: Moving out the unique_ptr leaves a nullptr in the vector at defer_queue_front_.
// This is intentional and safe because:
// 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
// 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_
// and has_cancelled_timeout_in_container_ in scheduler.h)
// 3. The lock protects concurrent access, but the nullptr remains until cleanup
item = std::move(this->defer_queue_[this->defer_queue_front_]);
this->defer_queue_front_++;
}
// Execute callback without holding lock to prevent deadlocks
// if the callback tries to call defer() again
if (!this->should_skip_item_(item.get())) {
now = this->execute_item_(item.get(), now);
}
// Recycle the defer item after execution
this->recycle_item_(std::move(item));
}
// If we've consumed all items up to the snapshot point, clean up the dead space
// Single consumer (main loop), so no lock needed for this check
if (this->defer_queue_front_ >= defer_queue_end) {
LockGuard lock(this->lock_);
this->cleanup_defer_queue_locked_();
}
}
// Helper to cleanup defer_queue_ after processing
// IMPORTANT: Caller must hold the scheduler lock before calling this function.
inline void cleanup_defer_queue_locked_() {

View File

@@ -350,7 +350,7 @@ def safe_exp(obj: SafeExpType) -> Expression:
return IntLiteral(int(obj.total_seconds))
if isinstance(obj, TimePeriodMinutes):
return IntLiteral(int(obj.total_minutes))
if isinstance(obj, (tuple, list)):
if isinstance(obj, tuple | list):
return ArrayInitializer(*[safe_exp(o) for o in obj])
if obj is bool:
return bool_

View File

@@ -133,6 +133,7 @@ ignore = [
"PLW1641", # Object does not implement `__hash__` method
"PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable
"PLW2901", # Outer {outer_kind} variable {name} overwritten by inner {inner_kind} target
"UP038", # https://github.com/astral-sh/ruff/issues/7871 https://github.com/astral-sh/ruff/pull/16681
]
[tool.ruff.lint.isort]

View File

@@ -1,6 +1,6 @@
pylint==4.0.2
flake8==7.3.0 # also change in .pre-commit-config.yaml when updating
ruff==0.14.3 # also change in .pre-commit-config.yaml when updating
ruff==0.14.2 # also change in .pre-commit-config.yaml when updating
pyupgrade==3.21.0 # also change in .pre-commit-config.yaml when updating
pre-commit

View File

@@ -2,7 +2,6 @@ from __future__ import annotations
from collections.abc import Callable
from functools import cache
import hashlib
import json
import os
import os.path
@@ -53,10 +52,6 @@ BASE_BUS_COMPONENTS = {
"remote_receiver",
}
# Cache version for components graph
# Increment this when the cache format or graph building logic changes
COMPONENTS_GRAPH_CACHE_VERSION = 1
def parse_list_components_output(output: str) -> list[str]:
"""Parse the output from list-components.py script.
@@ -757,71 +752,20 @@ def resolve_auto_load(
return auto_load()
@cache
def get_components_graph_cache_key() -> str:
"""Generate cache key based on all component Python file hashes.
Uses git ls-files with sha1 hashes to generate a stable cache key that works
across different machines and CI runs. This is faster and more reliable than
reading file contents or using modification times.
Returns:
SHA256 hex string uniquely identifying the current component state
"""
# Use git ls-files -s to get sha1 hashes of all component Python files
# Format: <mode> <sha1> <stage> <path>
# This is fast and works consistently across CI and local dev
# We hash all .py files because AUTO_LOAD, DEPENDENCIES, etc. can be defined
# in any Python file, not just __init__.py
cmd = ["git", "ls-files", "-s", "esphome/components/**/*.py"]
result = subprocess.run(
cmd, capture_output=True, text=True, check=True, cwd=root_path, close_fds=False
)
# Hash the git output (includes file paths and their sha1 hashes)
# This changes only when component Python files actually change
hasher = hashlib.sha256()
hasher.update(result.stdout.encode())
return hasher.hexdigest()
def create_components_graph() -> dict[str, list[str]]:
"""Create a graph of component dependencies (cached).
This function is expensive (5-6 seconds) because it imports all ESPHome components
to extract their DEPENDENCIES and AUTO_LOAD metadata. The result is cached based
on component file modification times, so unchanged components don't trigger a rebuild.
"""Create a graph of component dependencies.
Returns:
Dictionary mapping parent components to their children (dependencies)
"""
# Check cache first - use fixed filename since GitHub Actions cache doesn't support wildcards
cache_file = Path(temp_folder) / "components_graph.json"
if cache_file.exists():
try:
cached_data = json.loads(cache_file.read_text())
except (OSError, json.JSONDecodeError):
# Cache file corrupted or unreadable, rebuild
pass
else:
# Verify cache version matches
if cached_data.get("_version") == COMPONENTS_GRAPH_CACHE_VERSION:
# Verify cache is for current component state
cache_key = get_components_graph_cache_key()
if cached_data.get("_cache_key") == cache_key:
return cached_data.get("graph", {})
# Cache key mismatch - stale cache, rebuild
# Cache version mismatch - incompatible format, rebuild
from pathlib import Path
from esphome import const
from esphome.core import CORE
from esphome.loader import ComponentManifest, get_component, get_platform
# The root directory of the repo
root = Path(root_path)
root = Path(__file__).parent.parent
components_dir = root / ESPHOME_COMPONENTS_PATH
# Fake some directory so that get_component works
CORE.config_path = root
@@ -898,15 +842,6 @@ def create_components_graph() -> dict[str, list[str]]:
# restore config
CORE.data[KEY_CORE] = TARGET_CONFIGURATIONS[0]
# Save to cache with version and cache key for validation
cache_data = {
"_version": COMPONENTS_GRAPH_CACHE_VERSION,
"_cache_key": get_components_graph_cache_key(),
"graph": components_graph,
}
cache_file.parent.mkdir(exist_ok=True)
cache_file.write_text(json.dumps(cache_data))
return components_graph

View File

@@ -87,99 +87,3 @@ api:
- float_arr.size()
- string_arr[0].c_str()
- string_arr.size()
# Test ContinuationAction (IfAction with then/else branches)
- action: test_if_action
variables:
condition: bool
value: int
then:
- if:
condition:
lambda: 'return condition;'
then:
- logger.log:
format: "Condition true, value: %d"
args: ['value']
else:
- logger.log:
format: "Condition false, value: %d"
args: ['value']
- logger.log: "After if/else"
# Test nested IfAction (multiple ContinuationAction instances)
- action: test_nested_if
variables:
outer: bool
inner: bool
then:
- if:
condition:
lambda: 'return outer;'
then:
- if:
condition:
lambda: 'return inner;'
then:
- logger.log: "Both true"
else:
- logger.log: "Outer true, inner false"
else:
- logger.log: "Outer false"
- logger.log: "After nested if"
# Test WhileLoopContinuation (WhileAction)
- action: test_while_action
variables:
max_count: int
then:
- lambda: 'id(api_continuation_test_counter) = 0;'
- while:
condition:
lambda: 'return id(api_continuation_test_counter) < max_count;'
then:
- logger.log:
format: "While loop iteration: %d"
args: ['id(api_continuation_test_counter)']
- lambda: 'id(api_continuation_test_counter)++;'
- logger.log: "After while loop"
# Test RepeatLoopContinuation (RepeatAction)
- action: test_repeat_action
variables:
count: int
then:
- repeat:
count: !lambda 'return count;'
then:
- logger.log:
format: "Repeat iteration: %d"
args: ['iteration']
- logger.log: "After repeat"
# Test combined continuations (if + while + repeat)
- action: test_combined_continuations
variables:
do_loop: bool
loop_count: int
then:
- if:
condition:
lambda: 'return do_loop;'
then:
- repeat:
count: !lambda 'return loop_count;'
then:
- lambda: 'id(api_continuation_test_counter) = iteration;'
- while:
condition:
lambda: 'return id(api_continuation_test_counter) > 0;'
then:
- logger.log:
format: "Combined: repeat=%d, while=%d"
args: ['iteration', 'id(api_continuation_test_counter)']
- lambda: 'id(api_continuation_test_counter)--;'
else:
- logger.log: "Skipped loops"
- logger.log: "After combined test"
globals:
- id: api_continuation_test_counter
type: int
restore_value: false
initial_value: '0'

View File

@@ -10,11 +10,7 @@ esphome:
on_shutdown:
logger.log: on_shutdown
on_loop:
if:
condition:
component.is_idle: binary_sensor_id
then:
logger.log: on_loop - sensor idle
logger.log: on_loop
compile_process_limit: 1
min_version: "2025.1"
name_add_mac_suffix: true
@@ -38,6 +34,5 @@ esphome:
binary_sensor:
- platform: template
id: binary_sensor_id
name: Other device sensor
device_id: other_device

View File

@@ -21,12 +21,12 @@ font:
id: roboto_greek
size: 20
glyphs: ["\u0300", "\u00C5", "\U000000C7"]
- file: "https://media.esphome.io/tests/fonts/Monocraft.ttf"
- file: "https://github.com/IdreesInc/Monocraft/releases/download/v3.0/Monocraft.ttf"
id: monocraft
size: 20
- file:
type: web
url: "https://media.esphome.io/tests/fonts/Monocraft.ttf"
url: "https://github.com/IdreesInc/Monocraft/releases/download/v3.0/Monocraft.ttf"
id: monocraft2
size: 24
- file: $component_dir/Monocraft.ttf

View File

@@ -21,12 +21,12 @@ font:
id: roboto_greek
size: 20
glyphs: ["\u0300", "\u00C5", "\U000000C7"]
- file: "https://media.esphome.io/tests/fonts/Monocraft.ttf"
- file: "https://github.com/IdreesInc/Monocraft/releases/download/v3.0/Monocraft.ttf"
id: monocraft
size: 20
- file:
type: web
url: "https://media.esphome.io/tests/fonts/Monocraft.ttf"
url: "https://github.com/IdreesInc/Monocraft/releases/download/v3.0/Monocraft.ttf"
id: monocraft2
size: 24
- file: $component_dir/Monocraft.ttf

View File

@@ -50,16 +50,16 @@ image:
transparency: opaque
- id: web_svg_image
file: https://media.esphome.io/logo/logo.svg
file: https://raw.githubusercontent.com/esphome/esphome-docs/a62d7ab193c1a464ed791670170c7d518189109b/images/logo.svg
resize: 256x48
type: BINARY
transparency: chroma_key
- id: web_tiff_image
file: https://media.esphome.io/tests/images/SIPI_Jelly_Beans_4.1.07.tiff
file: https://upload.wikimedia.org/wikipedia/commons/b/b6/SIPI_Jelly_Beans_4.1.07.tiff
type: RGB
resize: 48x48
- id: web_redirect_image
file: https://media.esphome.io/logo/logo.png
file: https://avatars.githubusercontent.com/u/3060199?s=48&v=4
type: RGB
resize: 48x48
- id: mdi_alert

View File

@@ -1,5 +1,6 @@
display:
- platform: sdl
id: image_display
auto_clear_enabled: false
dimensions:
width: 480

View File

@@ -14,14 +14,12 @@ interval:
// Test parse_json
bool parse_ok = esphome::json::parse_json(json_str, [](JsonObject root) {
if (root["sensor"].is<const char*>() && root["value"].is<float>()) {
if (root.containsKey("sensor") && root.containsKey("value")) {
const char* sensor = root["sensor"];
float value = root["value"];
ESP_LOGD("test", "Parsed: sensor=%s, value=%.1f", sensor, value);
return true;
} else {
ESP_LOGD("test", "Parsed JSON missing required keys");
return false;
}
});
ESP_LOGD("test", "Parse result (JSON syntax only): %s", parse_ok ? "success" : "failed");

View File

@@ -68,13 +68,5 @@ lvgl:
enter_button: pushbutton
group: general
initial_focus: lv_roller
on_draw_start:
- logger.log: draw started
on_draw_end:
- logger.log: draw ended
- lvgl.pause:
- component.update: tft_display
- delay: 60s
- lvgl.resume:
<<: !include common.yaml

View File

@@ -1,2 +0,0 @@
network:
enable_ipv6: true

View File

@@ -1,3 +0,0 @@
nrf52:
# it is not correct bootloader for the board
bootloader: adafruit_nrf52_sd140_v6

View File

@@ -13,11 +13,14 @@ display:
binary_sensor:
- platform: sdl
sdl_id: sdl_display
id: key_up
key: SDLK_UP
key: SDLK_a
- platform: sdl
sdl_id: sdl_display
id: key_down
key: SDLK_DOWN
key: SDLK_d
- platform: sdl
sdl_id: sdl_display
id: key_enter
key: SDLK_RETURN
key: SDLK_s

View File

@@ -1,29 +0,0 @@
esphome:
name: test-web-server-idf
esp32:
board: esp32dev
framework:
type: esp-idf
network:
# Add some entities to test SSE event formatting
sensor:
- platform: template
name: "Test Sensor"
id: test_sensor
update_interval: 60s
lambda: "return 42.5;"
binary_sensor:
- platform: template
name: "Test Binary Sensor"
id: test_binary_sensor
lambda: "return true;"
switch:
- platform: template
name: "Test Switch"
id: test_switch
optimistic: true

View File

@@ -1,3 +0,0 @@
<<: !include common.yaml
web_server:

View File

@@ -1,105 +0,0 @@
esphome:
name: action-concurrent-reentry
on_boot:
- priority: -100
then:
- repeat:
count: 5
then:
- lambda: id(handler_wait_until)->execute(id(global_counter));
- lambda: id(handler_repeat)->execute(id(global_counter));
- lambda: id(handler_while)->execute(id(global_counter));
- lambda: id(handler_script_wait)->execute(id(global_counter));
- delay: 50ms
- lambda: id(global_counter)++;
- delay: 50ms
host:
api:
globals:
- id: global_counter
type: int
script:
- id: handler_wait_until
mode: parallel
parameters:
arg: int
then:
- wait_until:
condition:
lambda: return id(global_counter) == 5;
- logger.log:
format: "AFTER wait_until ARG %d"
args:
- arg
- id: handler_script_wait
mode: parallel
parameters:
arg: int
then:
- script.wait: handler_wait_until
- logger.log:
format: "AFTER script.wait ARG %d"
args:
- arg
- id: handler_repeat
mode: parallel
parameters:
arg: int
then:
- repeat:
count: 3
then:
- logger.log:
format: "IN repeat %d ARG %d"
args:
- iteration
- arg
- delay: 100ms
- logger.log:
format: "AFTER repeat ARG %d"
args:
- arg
- id: handler_while
mode: parallel
parameters:
arg: int
then:
- while:
condition:
lambda: return id(global_counter) != 5;
then:
- logger.log:
format: "IN while ARG %d"
args:
- arg
- delay: 100ms
- logger.log:
format: "AFTER while ARG %d"
args:
- arg
logger:
level: DEBUG

View File

@@ -1,130 +0,0 @@
esphome:
name: test-automation-wait-actions
host:
api:
actions:
# Test 1: Trigger wait_until automation 5 times rapidly
- action: test_wait_until
then:
- logger.log: "=== TEST 1: Triggering wait_until automation 5 times ==="
# Publish 5 different values to trigger the on_value automation 5 times
- sensor.template.publish:
id: wait_until_sensor
state: 1
- sensor.template.publish:
id: wait_until_sensor
state: 2
- sensor.template.publish:
id: wait_until_sensor
state: 3
- sensor.template.publish:
id: wait_until_sensor
state: 4
- sensor.template.publish:
id: wait_until_sensor
state: 5
# Wait then satisfy the condition so all 5 waiting actions complete
- delay: 100ms
- globals.set:
id: test_flag
value: 'true'
# Test 2: Trigger script.wait automation 5 times rapidly
- action: test_script_wait
then:
- logger.log: "=== TEST 2: Triggering script.wait automation 5 times ==="
# Start a long-running script
- script.execute: blocking_script
# Publish 5 different values to trigger the on_value automation 5 times
- sensor.template.publish:
id: script_wait_sensor
state: 1
- sensor.template.publish:
id: script_wait_sensor
state: 2
- sensor.template.publish:
id: script_wait_sensor
state: 3
- sensor.template.publish:
id: script_wait_sensor
state: 4
- sensor.template.publish:
id: script_wait_sensor
state: 5
# Test 3: Trigger wait_until timeout automation 5 times rapidly
- action: test_wait_timeout
then:
- logger.log: "=== TEST 3: Triggering timeout automation 5 times ==="
# Publish 5 different values (condition will never be true, all will timeout)
- sensor.template.publish:
id: timeout_sensor
state: 1
- sensor.template.publish:
id: timeout_sensor
state: 2
- sensor.template.publish:
id: timeout_sensor
state: 3
- sensor.template.publish:
id: timeout_sensor
state: 4
- sensor.template.publish:
id: timeout_sensor
state: 5
logger:
level: DEBUG
globals:
- id: test_flag
type: bool
restore_value: false
initial_value: 'false'
- id: timeout_flag
type: bool
restore_value: false
initial_value: 'false'
# Sensors with wait_until/script.wait in their on_value automations
sensor:
# Test 1: on_value automation with wait_until
- platform: template
id: wait_until_sensor
on_value:
# This wait_until will be hit 5 times before any complete
- wait_until:
condition:
lambda: return id(test_flag);
- logger.log: "wait_until automation completed"
# Test 2: on_value automation with script.wait
- platform: template
id: script_wait_sensor
on_value:
# This script.wait will be hit 5 times before any complete
- script.wait: blocking_script
- logger.log: "script.wait automation completed"
# Test 3: on_value automation with wait_until timeout
- platform: template
id: timeout_sensor
on_value:
# This wait_until will be hit 5 times, all will timeout
- wait_until:
condition:
lambda: return id(timeout_flag);
timeout: 200ms
- logger.log: "timeout automation completed"
script:
# Blocking script for script.wait test
- id: blocking_script
mode: single
then:
- logger.log: "Blocking script: START"
- delay: 200ms
- logger.log: "Blocking script: END"

View File

@@ -1,174 +0,0 @@
esphome:
name: test-continuation-actions
host:
api:
actions:
# Test 1: IfAction with ContinuationAction (then/else branches)
- action: test_if_action
variables:
condition: bool
value: int
then:
- logger.log:
format: "Test if: condition=%s, value=%d"
args: ['YESNO(condition)', 'value']
- if:
condition:
lambda: 'return condition;'
then:
- logger.log:
format: "if-then executed: value=%d"
args: ['value']
else:
- logger.log:
format: "if-else executed: value=%d"
args: ['value']
- logger.log: "if completed"
# Test 2: Nested IfAction (multiple ContinuationAction instances)
- action: test_nested_if
variables:
outer: bool
inner: bool
then:
- logger.log:
format: "Test nested if: outer=%s, inner=%s"
args: ['YESNO(outer)', 'YESNO(inner)']
- if:
condition:
lambda: 'return outer;'
then:
- if:
condition:
lambda: 'return inner;'
then:
- logger.log: "nested-both-true"
else:
- logger.log: "nested-outer-true-inner-false"
else:
- logger.log: "nested-outer-false"
- logger.log: "nested if completed"
# Test 3: WhileAction with WhileLoopContinuation
- action: test_while_action
variables:
max_count: int
then:
- logger.log:
format: "Test while: max_count=%d"
args: ['max_count']
- globals.set:
id: continuation_test_counter
value: !lambda 'return 0;'
- while:
condition:
lambda: 'return id(continuation_test_counter) < max_count;'
then:
- logger.log:
format: "while-iteration-%d"
args: ['id(continuation_test_counter)']
- globals.set:
id: continuation_test_counter
value: !lambda 'return id(continuation_test_counter) + 1;'
- logger.log: "while completed"
# Test 4: RepeatAction with RepeatLoopContinuation
- action: test_repeat_action
variables:
count: int
then:
- logger.log:
format: "Test repeat: count=%d"
args: ['count']
- repeat:
count: !lambda 'return count;'
then:
- logger.log:
format: "repeat-iteration-%d"
args: ['iteration']
- logger.log: "repeat completed"
# Test 5: Combined continuations (if + while + repeat)
- action: test_combined
variables:
do_loop: bool
loop_count: int
then:
- logger.log:
format: "Test combined: do_loop=%s, loop_count=%d"
args: ['YESNO(do_loop)', 'loop_count']
- if:
condition:
lambda: 'return do_loop;'
then:
- repeat:
count: !lambda 'return loop_count;'
then:
- globals.set:
id: continuation_test_counter
value: !lambda 'return iteration;'
- while:
condition:
lambda: 'return id(continuation_test_counter) > 0;'
then:
- logger.log:
format: "combined-repeat%d-while%d"
args: ['iteration', 'id(continuation_test_counter)']
- globals.set:
id: continuation_test_counter
value: !lambda 'return id(continuation_test_counter) - 1;'
else:
- logger.log: "combined-skipped"
- logger.log: "combined completed"
# Test 6: Rapid triggers to verify memory efficiency
- action: test_rapid_if
then:
- logger.log: "=== Rapid if test start ==="
- sensor.template.publish:
id: rapid_sensor
state: 1
- sensor.template.publish:
id: rapid_sensor
state: 2
- sensor.template.publish:
id: rapid_sensor
state: 3
- sensor.template.publish:
id: rapid_sensor
state: 4
- sensor.template.publish:
id: rapid_sensor
state: 5
- logger.log: "=== Rapid if test published 5 values ==="
logger:
level: DEBUG
globals:
- id: continuation_test_counter
type: int
restore_value: false
initial_value: '0'
# Sensor to test rapid automation triggers with if/else (ContinuationAction)
sensor:
- platform: template
id: rapid_sensor
on_value:
- if:
condition:
lambda: 'return x > 2;'
then:
- logger.log:
format: "rapid-if-then: value=%d"
args: ['(int)x']
else:
- logger.log:
format: "rapid-if-else: value=%d"
args: ['(int)x']
- logger.log:
format: "rapid-if-completed: value=%d"
args: ['(int)x']

View File

@@ -1,92 +0,0 @@
"""Integration test for API conditional memory optimization with triggers and services."""
from __future__ import annotations
import asyncio
import collections
import re
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_action_concurrent_reentry(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""
This test runs a script in parallel with varying arguments and verifies if
each script keeps its original argument throughout its execution
"""
test_complete = asyncio.Event()
expected = {0, 1, 2, 3, 4}
# Patterns to match in logs
after_wait_until_pattern = re.compile(r"AFTER wait_until ARG (\d+)")
after_script_wait_pattern = re.compile(r"AFTER script\.wait ARG (\d+)")
after_repeat_pattern = re.compile(r"AFTER repeat ARG (\d+)")
in_repeat_pattern = re.compile(r"IN repeat (\d+) ARG (\d+)")
after_while_pattern = re.compile(r"AFTER while ARG (\d+)")
in_while_pattern = re.compile(r"IN while ARG (\d+)")
after_wait_until_args = []
after_script_wait_args = []
after_while_args = []
in_while_args = []
after_repeat_args = []
in_repeat_args = collections.defaultdict(list)
def check_output(line: str) -> None:
"""Check log output for expected messages."""
if test_complete.is_set():
return
if mo := after_wait_until_pattern.search(line):
after_wait_until_args.append(int(mo.group(1)))
elif mo := after_script_wait_pattern.search(line):
after_script_wait_args.append(int(mo.group(1)))
elif mo := in_while_pattern.search(line):
in_while_args.append(int(mo.group(1)))
elif mo := after_while_pattern.search(line):
after_while_args.append(int(mo.group(1)))
elif mo := in_repeat_pattern.search(line):
in_repeat_args[int(mo.group(1))].append(int(mo.group(2)))
elif mo := after_repeat_pattern.search(line):
after_repeat_args.append(int(mo.group(1)))
if len(after_repeat_args) == len(expected):
test_complete.set()
# Run with log monitoring
async with (
run_compiled(yaml_config, line_callback=check_output),
api_client_connected() as client,
):
# Verify device info
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "action-concurrent-reentry"
# Wait for tests to complete with timeout
try:
await asyncio.wait_for(test_complete.wait(), timeout=8.0)
except TimeoutError:
pytest.fail("test timed out")
# order may change, but all args must be present
for args in in_repeat_args.values():
assert set(args) == expected
assert set(in_repeat_args.keys()) == {0, 1, 2}
assert set(after_wait_until_args) == expected, after_wait_until_args
assert set(after_script_wait_args) == expected, after_script_wait_args
assert set(after_repeat_args) == expected, after_repeat_args
assert set(after_while_args) == expected, after_while_args
assert dict(collections.Counter(in_while_args)) == {
0: 5,
1: 4,
2: 3,
3: 2,
4: 1,
}, in_while_args

View File

@@ -1,104 +0,0 @@
"""Test concurrent execution of wait_until and script.wait in direct automation actions."""
from __future__ import annotations
import asyncio
import re
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_automation_wait_actions(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""
Test that wait_until and script.wait correctly handle concurrent executions
when automation actions (not scripts) are triggered multiple times rapidly.
This tests sensor.on_value automations being triggered 5 times before any complete.
"""
loop = asyncio.get_running_loop()
# Track completion counts
test_results = {
"wait_until": 0,
"script_wait": 0,
"wait_until_timeout": 0,
}
# Patterns for log messages
wait_until_complete = re.compile(r"wait_until automation completed")
script_wait_complete = re.compile(r"script\.wait automation completed")
timeout_complete = re.compile(r"timeout automation completed")
# Test completion futures
test1_complete = loop.create_future()
test2_complete = loop.create_future()
test3_complete = loop.create_future()
def check_output(line: str) -> None:
"""Check log output for completion messages."""
# Test 1: wait_until concurrent execution
if wait_until_complete.search(line):
test_results["wait_until"] += 1
if test_results["wait_until"] == 5 and not test1_complete.done():
test1_complete.set_result(True)
# Test 2: script.wait concurrent execution
if script_wait_complete.search(line):
test_results["script_wait"] += 1
if test_results["script_wait"] == 5 and not test2_complete.done():
test2_complete.set_result(True)
# Test 3: wait_until with timeout
if timeout_complete.search(line):
test_results["wait_until_timeout"] += 1
if test_results["wait_until_timeout"] == 5 and not test3_complete.done():
test3_complete.set_result(True)
async with (
run_compiled(yaml_config, line_callback=check_output),
api_client_connected() as client,
):
# Get services
_, services = await client.list_entities_services()
# Test 1: wait_until in automation - trigger 5 times rapidly
test_service = next((s for s in services if s.name == "test_wait_until"), None)
assert test_service is not None, "test_wait_until service not found"
client.execute_service(test_service, {})
await asyncio.wait_for(test1_complete, timeout=3.0)
# Verify Test 1: All 5 triggers should complete
assert test_results["wait_until"] == 5, (
f"Test 1: Expected 5 wait_until completions, got {test_results['wait_until']}"
)
# Test 2: script.wait in automation - trigger 5 times rapidly
test_service = next((s for s in services if s.name == "test_script_wait"), None)
assert test_service is not None, "test_script_wait service not found"
client.execute_service(test_service, {})
await asyncio.wait_for(test2_complete, timeout=3.0)
# Verify Test 2: All 5 triggers should complete
assert test_results["script_wait"] == 5, (
f"Test 2: Expected 5 script.wait completions, got {test_results['script_wait']}"
)
# Test 3: wait_until with timeout in automation - trigger 5 times rapidly
test_service = next(
(s for s in services if s.name == "test_wait_timeout"), None
)
assert test_service is not None, "test_wait_timeout service not found"
client.execute_service(test_service, {})
await asyncio.wait_for(test3_complete, timeout=3.0)
# Verify Test 3: All 5 triggers should timeout and complete
assert test_results["wait_until_timeout"] == 5, (
f"Test 3: Expected 5 timeout completions, got {test_results['wait_until_timeout']}"
)

View File

@@ -1,235 +0,0 @@
"""Test continuation actions (ContinuationAction, WhileLoopContinuation, RepeatLoopContinuation)."""
from __future__ import annotations
import asyncio
import re
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_continuation_actions(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""
Test that continuation actions work correctly for if/while/repeat.
These continuation classes replace LambdaAction with simple parent pointers,
saving 32-36 bytes per instance and eliminating std::function overhead.
"""
loop = asyncio.get_running_loop()
# Track test completions
test_results = {
"if_then": False,
"if_else": False,
"if_complete": False,
"nested_both_true": False,
"nested_outer_true_inner_false": False,
"nested_outer_false": False,
"nested_complete": False,
"while_iterations": 0,
"while_complete": False,
"repeat_iterations": 0,
"repeat_complete": False,
"combined_iterations": 0,
"combined_complete": False,
"rapid_then": 0,
"rapid_else": 0,
"rapid_complete": 0,
}
# Patterns for log messages
if_then_pattern = re.compile(r"if-then executed: value=(\d+)")
if_else_pattern = re.compile(r"if-else executed: value=(\d+)")
if_complete_pattern = re.compile(r"if completed")
nested_both_true_pattern = re.compile(r"nested-both-true")
nested_outer_true_inner_false_pattern = re.compile(r"nested-outer-true-inner-false")
nested_outer_false_pattern = re.compile(r"nested-outer-false")
nested_complete_pattern = re.compile(r"nested if completed")
while_iteration_pattern = re.compile(r"while-iteration-(\d+)")
while_complete_pattern = re.compile(r"while completed")
repeat_iteration_pattern = re.compile(r"repeat-iteration-(\d+)")
repeat_complete_pattern = re.compile(r"repeat completed")
combined_pattern = re.compile(r"combined-repeat(\d+)-while(\d+)")
combined_complete_pattern = re.compile(r"combined completed")
rapid_then_pattern = re.compile(r"rapid-if-then: value=(\d+)")
rapid_else_pattern = re.compile(r"rapid-if-else: value=(\d+)")
rapid_complete_pattern = re.compile(r"rapid-if-completed: value=(\d+)")
# Test completion futures
test1_complete = loop.create_future() # if action
test2_complete = loop.create_future() # nested if
test3_complete = loop.create_future() # while
test4_complete = loop.create_future() # repeat
test5_complete = loop.create_future() # combined
test6_complete = loop.create_future() # rapid
def check_output(line: str) -> None:
"""Check log output for test messages."""
# Test 1: IfAction
if if_then_pattern.search(line):
test_results["if_then"] = True
if if_else_pattern.search(line):
test_results["if_else"] = True
if if_complete_pattern.search(line):
test_results["if_complete"] = True
if not test1_complete.done():
test1_complete.set_result(True)
# Test 2: Nested IfAction
if nested_both_true_pattern.search(line):
test_results["nested_both_true"] = True
if nested_outer_true_inner_false_pattern.search(line):
test_results["nested_outer_true_inner_false"] = True
if nested_outer_false_pattern.search(line):
test_results["nested_outer_false"] = True
if nested_complete_pattern.search(line):
test_results["nested_complete"] = True
if not test2_complete.done():
test2_complete.set_result(True)
# Test 3: WhileAction
if match := while_iteration_pattern.search(line):
test_results["while_iterations"] = max(
test_results["while_iterations"], int(match.group(1)) + 1
)
if while_complete_pattern.search(line):
test_results["while_complete"] = True
if not test3_complete.done():
test3_complete.set_result(True)
# Test 4: RepeatAction
if match := repeat_iteration_pattern.search(line):
test_results["repeat_iterations"] = max(
test_results["repeat_iterations"], int(match.group(1)) + 1
)
if repeat_complete_pattern.search(line):
test_results["repeat_complete"] = True
if not test4_complete.done():
test4_complete.set_result(True)
# Test 5: Combined
if combined_pattern.search(line):
test_results["combined_iterations"] += 1
if combined_complete_pattern.search(line):
test_results["combined_complete"] = True
if not test5_complete.done():
test5_complete.set_result(True)
# Test 6: Rapid triggers
if rapid_then_pattern.search(line):
test_results["rapid_then"] += 1
if rapid_else_pattern.search(line):
test_results["rapid_else"] += 1
if rapid_complete_pattern.search(line):
test_results["rapid_complete"] += 1
if test_results["rapid_complete"] == 5 and not test6_complete.done():
test6_complete.set_result(True)
async with (
run_compiled(yaml_config, line_callback=check_output),
api_client_connected() as client,
):
# Get services
_, services = await client.list_entities_services()
# Test 1: IfAction with then branch
test_service = next((s for s in services if s.name == "test_if_action"), None)
assert test_service is not None, "test_if_action service not found"
client.execute_service(test_service, {"condition": True, "value": 42})
await asyncio.wait_for(test1_complete, timeout=2.0)
assert test_results["if_then"], "IfAction then branch not executed"
assert test_results["if_complete"], "IfAction did not complete"
# Test 1b: IfAction with else branch
test1_complete = loop.create_future()
test_results["if_complete"] = False
client.execute_service(test_service, {"condition": False, "value": 99})
await asyncio.wait_for(test1_complete, timeout=2.0)
assert test_results["if_else"], "IfAction else branch not executed"
assert test_results["if_complete"], "IfAction did not complete"
# Test 2: Nested IfAction - test all branches
test_service = next((s for s in services if s.name == "test_nested_if"), None)
assert test_service is not None, "test_nested_if service not found"
# Both true
client.execute_service(test_service, {"outer": True, "inner": True})
await asyncio.wait_for(test2_complete, timeout=2.0)
assert test_results["nested_both_true"], "Nested both true not executed"
# Outer true, inner false
test2_complete = loop.create_future()
test_results["nested_complete"] = False
client.execute_service(test_service, {"outer": True, "inner": False})
await asyncio.wait_for(test2_complete, timeout=2.0)
assert test_results["nested_outer_true_inner_false"], (
"Nested outer true inner false not executed"
)
# Outer false
test2_complete = loop.create_future()
test_results["nested_complete"] = False
client.execute_service(test_service, {"outer": False, "inner": True})
await asyncio.wait_for(test2_complete, timeout=2.0)
assert test_results["nested_outer_false"], "Nested outer false not executed"
# Test 3: WhileAction
test_service = next(
(s for s in services if s.name == "test_while_action"), None
)
assert test_service is not None, "test_while_action service not found"
client.execute_service(test_service, {"max_count": 3})
await asyncio.wait_for(test3_complete, timeout=2.0)
assert test_results["while_iterations"] == 3, (
f"WhileAction expected 3 iterations, got {test_results['while_iterations']}"
)
assert test_results["while_complete"], "WhileAction did not complete"
# Test 4: RepeatAction
test_service = next(
(s for s in services if s.name == "test_repeat_action"), None
)
assert test_service is not None, "test_repeat_action service not found"
client.execute_service(test_service, {"count": 5})
await asyncio.wait_for(test4_complete, timeout=2.0)
assert test_results["repeat_iterations"] == 5, (
f"RepeatAction expected 5 iterations, got {test_results['repeat_iterations']}"
)
assert test_results["repeat_complete"], "RepeatAction did not complete"
# Test 5: Combined (if + repeat + while)
test_service = next((s for s in services if s.name == "test_combined"), None)
assert test_service is not None, "test_combined service not found"
client.execute_service(test_service, {"do_loop": True, "loop_count": 2})
await asyncio.wait_for(test5_complete, timeout=2.0)
# Should execute: repeat 2 times, each iteration does while from iteration down to 0
# iteration 0: while 0 times = 0
# iteration 1: while 1 time = 1
# Total: 1 combined log
assert test_results["combined_iterations"] >= 1, (
f"Combined expected >=1 iterations, got {test_results['combined_iterations']}"
)
assert test_results["combined_complete"], "Combined did not complete"
# Test 6: Rapid triggers (tests memory efficiency of ContinuationAction)
test_service = next((s for s in services if s.name == "test_rapid_if"), None)
assert test_service is not None, "test_rapid_if service not found"
client.execute_service(test_service, {})
await asyncio.wait_for(test6_complete, timeout=2.0)
# Values 1, 2 should hit else (<=2), values 3, 4, 5 should hit then (>2)
assert test_results["rapid_else"] == 2, (
f"Rapid test expected 2 else, got {test_results['rapid_else']}"
)
assert test_results["rapid_then"] == 3, (
f"Rapid test expected 3 then, got {test_results['rapid_then']}"
)
assert test_results["rapid_complete"] == 5, (
f"Rapid test expected 5 completions, got {test_results['rapid_complete']}"
)

View File

@@ -543,7 +543,6 @@ def test_main_filters_components_without_tests(
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch.object(helpers, "create_components_graph", return_value={}),
patch("sys.argv", ["determine-jobs.py"]),
patch.object(
determine_jobs,
@@ -641,7 +640,6 @@ def test_main_detects_components_with_variant_tests(
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch.object(helpers, "create_components_graph", return_value={}),
patch("sys.argv", ["determine-jobs.py"]),
patch.object(
determine_jobs,

View File

@@ -1,6 +1,5 @@
"""Unit tests for script/helpers.py module."""
from collections.abc import Generator
import json
import os
from pathlib import Path
@@ -1102,262 +1101,3 @@ def test_get_component_from_path(
"""Test extraction of component names from file paths."""
result = helpers.get_component_from_path(file_path)
assert result == expected_component
# Components graph cache tests
@pytest.fixture
def mock_git_output() -> str:
"""Fixture for mock git ls-files output with realistic component files.
Includes examples of AUTO_LOAD in sensor.py and binary_sensor.py files,
which is why we need to hash all .py files, not just __init__.py.
"""
return (
"100644 abc123... 0 esphome/components/wifi/__init__.py\n"
"100644 def456... 0 esphome/components/api/__init__.py\n"
"100644 ghi789... 0 esphome/components/xiaomi_lywsd03mmc/__init__.py\n"
"100644 jkl012... 0 esphome/components/xiaomi_lywsd03mmc/sensor.py\n"
"100644 mno345... 0 esphome/components/xiaomi_cgpr1/__init__.py\n"
"100644 pqr678... 0 esphome/components/xiaomi_cgpr1/binary_sensor.py\n"
)
@pytest.fixture
def mock_cache_file(tmp_path: Path) -> Path:
"""Fixture for a temporary cache file path."""
return tmp_path / "components_graph.json"
@pytest.fixture(autouse=True)
def clear_cache_key_cache() -> None:
"""Clear the components graph cache key cache before each test."""
helpers.get_components_graph_cache_key.cache_clear()
@pytest.fixture
def mock_subprocess_run() -> Generator[Mock, None, None]:
"""Fixture to mock subprocess.run for git commands."""
with patch("subprocess.run") as mock_run:
yield mock_run
def test_cache_key_generation(mock_git_output: str, mock_subprocess_run: Mock) -> None:
"""Test that cache key is generated based on git file hashes."""
mock_result = Mock()
mock_result.stdout = mock_git_output
mock_subprocess_run.return_value = mock_result
key = helpers.get_components_graph_cache_key()
# Should be a 64-character hex string (SHA256)
assert len(key) == 64
assert all(c in "0123456789abcdef" for c in key)
def test_cache_key_consistent_for_same_files(
mock_git_output: str, mock_subprocess_run: Mock
) -> None:
"""Test that same git output produces same cache key."""
mock_result = Mock()
mock_result.stdout = mock_git_output
mock_subprocess_run.return_value = mock_result
key1 = helpers.get_components_graph_cache_key()
key2 = helpers.get_components_graph_cache_key()
assert key1 == key2
def test_cache_key_different_for_changed_files(mock_subprocess_run: Mock) -> None:
"""Test that different git output produces different cache key.
This test demonstrates that changes to any .py file (not just __init__.py)
will invalidate the cache, which is important because AUTO_LOAD can be
defined in sensor.py, binary_sensor.py, etc.
"""
mock_result1 = Mock()
mock_result1.stdout = (
"100644 abc123... 0 esphome/components/xiaomi_lywsd03mmc/sensor.py\n"
)
mock_result2 = Mock()
# Same file, different hash - simulates a change to AUTO_LOAD
mock_result2.stdout = (
"100644 xyz789... 0 esphome/components/xiaomi_lywsd03mmc/sensor.py\n"
)
mock_subprocess_run.return_value = mock_result1
key1 = helpers.get_components_graph_cache_key()
helpers.get_components_graph_cache_key.cache_clear()
mock_subprocess_run.return_value = mock_result2
key2 = helpers.get_components_graph_cache_key()
assert key1 != key2
def test_cache_key_uses_git_ls_files(
mock_git_output: str, mock_subprocess_run: Mock
) -> None:
"""Test that git ls-files command is called correctly."""
mock_result = Mock()
mock_result.stdout = mock_git_output
mock_subprocess_run.return_value = mock_result
helpers.get_components_graph_cache_key()
# Verify git ls-files was called with correct arguments
mock_subprocess_run.assert_called_once()
call_args = mock_subprocess_run.call_args
assert call_args[0][0] == [
"git",
"ls-files",
"-s",
"esphome/components/**/*.py",
]
assert call_args[1]["capture_output"] is True
assert call_args[1]["text"] is True
assert call_args[1]["check"] is True
assert call_args[1]["close_fds"] is False
def test_cache_hit_returns_cached_graph(
tmp_path: Path, mock_git_output: str, mock_subprocess_run: Mock
) -> None:
"""Test that cache hit returns cached data without rebuilding."""
mock_graph = {"wifi": ["network"], "api": ["socket"]}
cache_key = "a" * 64
cache_data = {
"_version": helpers.COMPONENTS_GRAPH_CACHE_VERSION,
"_cache_key": cache_key,
"graph": mock_graph,
}
# Write cache file
cache_file = tmp_path / "components_graph.json"
cache_file.write_text(json.dumps(cache_data))
mock_result = Mock()
mock_result.stdout = mock_git_output
mock_subprocess_run.return_value = mock_result
with (
patch("helpers.get_components_graph_cache_key", return_value=cache_key),
patch("helpers.temp_folder", str(tmp_path)),
):
result = helpers.create_components_graph()
assert result == mock_graph
def test_cache_miss_no_cache_file(
tmp_path: Path, mock_git_output: str, mock_subprocess_run: Mock
) -> None:
"""Test that cache miss rebuilds graph when no cache file exists."""
mock_result = Mock()
mock_result.stdout = mock_git_output
mock_subprocess_run.return_value = mock_result
# Create minimal components directory structure
components_dir = tmp_path / "esphome" / "components"
components_dir.mkdir(parents=True)
with (
patch("helpers.root_path", str(tmp_path)),
patch("helpers.temp_folder", str(tmp_path / ".temp")),
patch("helpers.get_components_graph_cache_key", return_value="test_key"),
):
result = helpers.create_components_graph()
# Should return empty graph for empty components directory
assert result == {}
def test_cache_miss_version_mismatch(
tmp_path: Path, mock_git_output: str, mock_subprocess_run: Mock
) -> None:
"""Test that cache miss rebuilds graph when version doesn't match."""
cache_data = {
"_version": 999, # Wrong version
"_cache_key": "test_key",
"graph": {"old": ["data"]},
}
cache_file = tmp_path / ".temp" / "components_graph.json"
cache_file.parent.mkdir(parents=True)
cache_file.write_text(json.dumps(cache_data))
mock_result = Mock()
mock_result.stdout = mock_git_output
mock_subprocess_run.return_value = mock_result
# Create minimal components directory structure
components_dir = tmp_path / "esphome" / "components"
components_dir.mkdir(parents=True)
with (
patch("helpers.root_path", str(tmp_path)),
patch("helpers.temp_folder", str(tmp_path / ".temp")),
patch("helpers.get_components_graph_cache_key", return_value="test_key"),
):
result = helpers.create_components_graph()
# Should rebuild and return empty graph, not use cached data
assert result == {}
def test_cache_miss_key_mismatch(
tmp_path: Path, mock_git_output: str, mock_subprocess_run: Mock
) -> None:
"""Test that cache miss rebuilds graph when cache key doesn't match."""
cache_data = {
"_version": helpers.COMPONENTS_GRAPH_CACHE_VERSION,
"_cache_key": "old_key",
"graph": {"old": ["data"]},
}
cache_file = tmp_path / ".temp" / "components_graph.json"
cache_file.parent.mkdir(parents=True)
cache_file.write_text(json.dumps(cache_data))
mock_result = Mock()
mock_result.stdout = mock_git_output
mock_subprocess_run.return_value = mock_result
# Create minimal components directory structure
components_dir = tmp_path / "esphome" / "components"
components_dir.mkdir(parents=True)
with (
patch("helpers.root_path", str(tmp_path)),
patch("helpers.temp_folder", str(tmp_path / ".temp")),
patch("helpers.get_components_graph_cache_key", return_value="new_key"),
):
result = helpers.create_components_graph()
# Should rebuild and return empty graph, not use cached data with old key
assert result == {}
def test_cache_miss_corrupted_json(
tmp_path: Path, mock_git_output: str, mock_subprocess_run: Mock
) -> None:
"""Test that cache miss rebuilds graph when cache file has invalid JSON."""
cache_file = tmp_path / ".temp" / "components_graph.json"
cache_file.parent.mkdir(parents=True)
cache_file.write_text("{invalid json")
mock_result = Mock()
mock_result.stdout = mock_git_output
mock_subprocess_run.return_value = mock_result
# Create minimal components directory structure
components_dir = tmp_path / "esphome" / "components"
components_dir.mkdir(parents=True)
with (
patch("helpers.root_path", str(tmp_path)),
patch("helpers.temp_folder", str(tmp_path / ".temp")),
patch("helpers.get_components_graph_cache_key", return_value="test_key"),
):
result = helpers.create_components_graph()
# Should handle corruption gracefully and rebuild
assert result == {}

View File

@@ -33,4 +33,3 @@ test_list:
{{{ "x", "79"}, { "y", "82"}}}
- '{{{"AA"}}}'
- '"HELLO"'
- '{ 79, 82 }'

View File

@@ -34,4 +34,3 @@ test_list:
{{{ "x", "${ position.x }"}, { "y", "${ position.y }"}}}
- ${ '{{{"AA"}}}' }
- ${ '"HELLO"' }
- '{ ${position.x}, ${position.y} }'

View File

@@ -3,7 +3,6 @@ import string
from hypothesis import example, given
from hypothesis.strategies import builds, integers, ip_addresses, one_of, text
import pytest
import voluptuous as vol
from esphome import config_validation
from esphome.components.esp32.const import (
@@ -302,6 +301,8 @@ def test_split_default(framework, platform, variant, full, idf, arduino, simple)
],
)
def test_require_framework_version(framework, platform, message):
import voluptuous as vol
from esphome.const import (
KEY_CORE,
KEY_FRAMEWORK_VERSION,
@@ -376,129 +377,3 @@ def test_require_framework_version(framework, platform, message):
config_validation.require_framework_version(
extra_message="test 5",
)("test")
def test_only_with_single_component_loaded() -> None:
"""Test OnlyWith with single component when component is loaded."""
CORE.loaded_integrations = {"mqtt"}
schema = config_validation.Schema(
{
config_validation.OnlyWith("mqtt_id", "mqtt", default="test_mqtt"): str,
}
)
result = schema({})
assert result.get("mqtt_id") == "test_mqtt"
def test_only_with_single_component_not_loaded() -> None:
"""Test OnlyWith with single component when component is not loaded."""
CORE.loaded_integrations = set()
schema = config_validation.Schema(
{
config_validation.OnlyWith("mqtt_id", "mqtt", default="test_mqtt"): str,
}
)
result = schema({})
assert "mqtt_id" not in result
def test_only_with_list_all_components_loaded() -> None:
"""Test OnlyWith with list when all components are loaded."""
CORE.loaded_integrations = {"zigbee", "nrf52"}
schema = config_validation.Schema(
{
config_validation.OnlyWith(
"zigbee_id", ["zigbee", "nrf52"], default="test_zigbee"
): str,
}
)
result = schema({})
assert result.get("zigbee_id") == "test_zigbee"
def test_only_with_list_partial_components_loaded() -> None:
"""Test OnlyWith with list when only some components are loaded."""
CORE.loaded_integrations = {"zigbee"} # Only zigbee, not nrf52
schema = config_validation.Schema(
{
config_validation.OnlyWith(
"zigbee_id", ["zigbee", "nrf52"], default="test_zigbee"
): str,
}
)
result = schema({})
assert "zigbee_id" not in result
def test_only_with_list_no_components_loaded() -> None:
"""Test OnlyWith with list when no components are loaded."""
CORE.loaded_integrations = set()
schema = config_validation.Schema(
{
config_validation.OnlyWith(
"zigbee_id", ["zigbee", "nrf52"], default="test_zigbee"
): str,
}
)
result = schema({})
assert "zigbee_id" not in result
def test_only_with_list_multiple_components() -> None:
"""Test OnlyWith with list requiring three components."""
CORE.loaded_integrations = {"comp1", "comp2", "comp3"}
schema = config_validation.Schema(
{
config_validation.OnlyWith(
"test_id", ["comp1", "comp2", "comp3"], default="test_value"
): str,
}
)
result = schema({})
assert result.get("test_id") == "test_value"
# Test with one missing
CORE.loaded_integrations = {"comp1", "comp2"}
result = schema({})
assert "test_id" not in result
def test_only_with_empty_list() -> None:
"""Test OnlyWith with empty list (edge case)."""
CORE.loaded_integrations = set()
schema = config_validation.Schema(
{
config_validation.OnlyWith("test_id", [], default="test_value"): str,
}
)
# all([]) returns True, so default should be applied
result = schema({})
assert result.get("test_id") == "test_value"
def test_only_with_user_value_overrides_default() -> None:
"""Test OnlyWith respects user-provided values over defaults."""
CORE.loaded_integrations = {"mqtt"}
schema = config_validation.Schema(
{
config_validation.OnlyWith("mqtt_id", "mqtt", default="default_id"): str,
}
)
result = schema({"mqtt_id": "custom_id"})
assert result.get("mqtt_id") == "custom_id"