1
0
mirror of https://github.com/esphome/esphome.git synced 2025-10-29 22:24:26 +00:00

Merge remote-tracking branch 'upstream/dev' into multi_device

This commit is contained in:
J. Nick Koston
2025-06-21 12:13:54 +02:00
748 changed files with 14531 additions and 7513 deletions

View File

@@ -66,5 +66,5 @@ def test_text_config_lamda_is_set(generate_main):
main_cpp = generate_main("tests/component_tests/text/test_text.yaml")
# Then
assert "it_4->set_template([=]() -> optional<std::string> {" in main_cpp
assert "it_4->set_template([=]() -> esphome::optional<std::string> {" in main_cpp
assert 'return std::string{"Hello"};' in main_cpp

View File

@@ -6,7 +6,6 @@ light:
rgb_order: GRB
num_leds: 256
pin: ${pin}
rmt_channel: 0
display:
- platform: addressable_light

View File

@@ -0,0 +1,15 @@
binary_sensor:
- platform: template
trigger_on_initial_state: true
id: some_binary_sensor
name: "Random binary"
lambda: return (random_uint32() & 1) == 0;
on_state_change:
then:
- logger.log:
format: "Old state was %s"
args: ['x_previous.has_value() ? ONOFF(x_previous) : "Unknown"']
- logger.log:
format: "New state is %s"
args: ['x.has_value() ? ONOFF(x) : "Unknown"']
- binary_sensor.invalidate_state: some_binary_sensor

View File

@@ -0,0 +1,2 @@
packages:
common: !include common.yaml

View File

@@ -0,0 +1,2 @@
packages:
common: !include common.yaml

View File

@@ -0,0 +1,2 @@
packages:
common: !include common.yaml

View File

@@ -0,0 +1,2 @@
packages:
common: !include common.yaml

View File

@@ -0,0 +1,2 @@
packages:
common: !include common.yaml

View File

@@ -0,0 +1,2 @@
packages:
common: !include common.yaml

View File

@@ -0,0 +1,2 @@
packages:
common: !include common.yaml

View File

@@ -0,0 +1,2 @@
packages:
common: !include common.yaml

View File

@@ -1,8 +0,0 @@
<<: !include common.yaml
esp32_ble_tracker:
max_connections: 3
bluetooth_proxy:
active: true
connection_slots: 2

View File

@@ -8,7 +8,6 @@ light:
rgb_order: GRB
num_leds: 256
pin: ${pin}
rmt_channel: 0
effects:
- e131:
universe: 1

View File

@@ -0,0 +1,12 @@
esp32:
board: esp32dev
framework:
type: esp-idf
advanced:
enable_lwip_mdns_queries: true
enable_lwip_bridge_interface: true
wifi:
ssid: MySSID
password: password1

View File

@@ -1 +1,5 @@
<<: !include common.yaml
esp32_ble:
io_capability: keyboard_only
disable_bt_logs: false

View File

@@ -1 +1,5 @@
<<: !include common.yaml
esp32_ble:
io_capability: keyboard_only
disable_bt_logs: false

View File

@@ -32,3 +32,7 @@ esp32_camera_web_server:
mode: stream
- port: 8081
mode: snapshot
wifi:
ssid: MySSID
password: password1

View File

@@ -1,3 +0,0 @@
sensor:
- platform: esp32_hall
name: ESP32 Hall Sensor

View File

@@ -1,18 +0,0 @@
light:
- platform: esp32_rmt_led_strip
id: led_strip1
pin: ${pin1}
num_leds: 60
rmt_channel: 0
rgb_order: GRB
chipset: ws2812
- platform: esp32_rmt_led_strip
id: led_strip2
pin: ${pin2}
num_leds: 60
rmt_channel: 1
rgb_order: RGB
bit0_high: 100us
bit0_low: 100us
bit1_high: 100us
bit1_low: 100us

View File

@@ -2,4 +2,5 @@ substitutions:
pin1: GPIO13
pin2: GPIO14
<<: !include common-ard.yaml
packages:
common: !include common.yaml

View File

@@ -2,4 +2,5 @@ substitutions:
pin1: GPIO3
pin2: GPIO4
<<: !include common-ard.yaml
packages:
common: !include common.yaml

View File

@@ -2,4 +2,5 @@ substitutions:
pin1: GPIO3
pin2: GPIO4
<<: !include common-idf.yaml
packages:
common: !include common.yaml

View File

@@ -2,4 +2,5 @@ substitutions:
pin1: GPIO13
pin2: GPIO14
<<: !include common-idf.yaml
packages:
common: !include common.yaml

View File

@@ -0,0 +1,12 @@
substitutions:
pin1: GPIO3
pin2: GPIO4
packages:
common: !include common.yaml
light:
- id: !extend led_strip1
use_dma: "true"
- id: !extend led_strip2
use_dma: "false"

View File

@@ -0,0 +1,15 @@
esp_ldo:
- id: ldo_id
channel: 3
voltage: 2.5V
adjustable: true
- id: ldo_4
channel: 4
voltage: 2.0V
esphome:
on_boot:
then:
- esp_ldo.voltage.adjust:
id: ldo_id
voltage: !lambda return 2.5;

View File

@@ -0,0 +1 @@
<<: !include common.yaml

View File

@@ -1,5 +1 @@
<<: !include common.yaml
esp32:
framework:
version: 2.0.9

View File

@@ -0,0 +1,16 @@
i2c:
- id: i2c_lc709203f
scl: ${scl_pin}
sda: ${sda_pin}
sensor:
- platform: lc709203f
size: 2000
voltage: 3.7
battery_voltage:
name: "Battery Voltage"
battery_level:
name: "Battery"
temperature:
name: "Pack Temperature"
b_constant: 0xA5A5

View File

@@ -0,0 +1,5 @@
substitutions:
scl_pin: GPIO16
sda_pin: GPIO17
<<: !include common.yaml

View File

@@ -0,0 +1,5 @@
substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
<<: !include common.yaml

View File

@@ -0,0 +1,5 @@
substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
<<: !include common.yaml

View File

@@ -0,0 +1,5 @@
substitutions:
scl_pin: GPIO16
sda_pin: GPIO17
<<: !include common.yaml

View File

@@ -0,0 +1,5 @@
substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
<<: !include common.yaml

View File

@@ -0,0 +1,5 @@
substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
<<: !include common.yaml

View File

@@ -63,7 +63,7 @@ binary_sensor:
id: lvgl_pressbutton
name: Pressbutton
widget: spin_up
publish_initial_state: true
trigger_on_initial_state: true
- platform: lvgl
name: ButtonMatrix button
widget: button_a

View File

@@ -170,6 +170,12 @@ lvgl:
lvgl.page.is_showing: page1
then:
logger.log: "Yes, page1 showing"
- if:
condition:
lvgl.is_idle:
timeout: !lambda return 5000;
then:
logger.log: LVGL is idle
on_unload:
- logger.log: page unloaded
- lvgl.widget.focus: mark

View File

@@ -281,6 +281,8 @@ display:
id: main_lcd
update_interval: 5s
command_spacing: 5ms
max_commands_per_loop: 20
max_queue_size: 50
on_sleep:
then:
lambda: 'ESP_LOGD("display","Display went to sleep");'

View File

@@ -11,6 +11,9 @@ online_image:
format: PNG
type: BINARY
resize: 50x50
request_headers:
X-Test1: 'Test1'
X-Test2: !lambda 'static int x; return to_string(x++);'
on_download_finished:
lambda: |-
if (cached) {

View File

@@ -0,0 +1,11 @@
network:
enable_ipv6: true
openthread:
channel: 13
network_name: OpenThread-8f28
network_key: 0xdfd34f0f05cad978ec4e32b0413038ff
pan_id: 0x8f28
ext_pan_id: 0xd63e8e3e495ebbc3
pskc: 0xc23a76e98f1a6483639b1ac1271e2e27
force_dataset: true

View File

@@ -0,0 +1,30 @@
network:
enable_ipv6: true
openthread:
channel: 13
network_key: 0xdfd34f0f05cad978ec4e32b0413038ff
pan_id: 0x8f28
text_sensor:
- platform: openthread_info
ip_address:
name: "Off-mesh routable IP Address"
channel:
name: "Channel"
role:
name: "Device Role"
rloc16:
name: "RLOC16"
ext_addr:
name: "Extended Address"
eui64:
name: "EUI64"
network_name:
name: "Network Name"
network_key:
name: "Network Key"
pan_id:
name: "PAN ID"
ext_pan_id:
name: "Extended PAN ID"

View File

@@ -5,7 +5,6 @@ light:
chipset: ws2812
num_leds: 256
rgb_order: GRB
rmt_channel: 1
pin: ${pin}
- platform: partition
name: Partition Light

View File

@@ -0,0 +1,9 @@
esp32:
cpu_frequency: 360MHz
framework:
type: esp-idf
advanced:
enable_idf_experimental_features: yes
psram:
speed: 200MHz

View File

@@ -1,14 +0,0 @@
remote_receiver:
- id: rcvr
pin: ${pin}
rmt_channel: ${rmt_channel}
dump: all
tolerance: 25%
<<: !include common-actions.yaml
binary_sensor:
- platform: remote_receiver
name: Panasonic Remote Input
panasonic:
address: 0x4004
command: 0x100BCBD

View File

@@ -7,7 +7,6 @@ remote_receiver:
filter_symbols: ${filter_symbols}
receive_symbols: ${receive_symbols}
rmt_symbols: ${rmt_symbols}
use_dma: ${use_dma}
<<: !include common-actions.yaml
binary_sensor:

View File

@@ -1,6 +1,9 @@
substitutions:
pin: GPIO2
rmt_channel: "2"
clock_resolution: "2000000"
filter_symbols: "2"
receive_symbols: "4"
rmt_symbols: "64"
packages:
common: !include esp32-common-ard.yaml
common: !include esp32-common.yaml

View File

@@ -1,6 +1,9 @@
substitutions:
pin: GPIO2
rmt_channel: "2"
clock_resolution: "2000000"
filter_symbols: "2"
receive_symbols: "4"
rmt_symbols: "64"
packages:
common: !include esp32-common-ard.yaml
common: !include esp32-common.yaml

View File

@@ -4,7 +4,6 @@ substitutions:
filter_symbols: "2"
receive_symbols: "4"
rmt_symbols: "64"
use_dma: "true"
packages:
common: !include esp32-common-idf.yaml
common: !include esp32-common.yaml

View File

@@ -4,7 +4,6 @@ substitutions:
filter_symbols: "2"
receive_symbols: "4"
rmt_symbols: "64"
use_dma: "true"
packages:
common: !include esp32-common-idf.yaml
common: !include esp32-common.yaml

View File

@@ -4,7 +4,10 @@ substitutions:
filter_symbols: "2"
receive_symbols: "4"
rmt_symbols: "64"
use_dma: "true"
packages:
common: !include esp32-common-idf.yaml
common: !include esp32-common.yaml
remote_receiver:
- id: !extend rcvr
use_dma: "true"

View File

@@ -1,8 +0,0 @@
remote_transmitter:
- id: xmitr
pin: ${pin}
rmt_channel: ${rmt_channel}
carrier_duty_percent: 50%
packages:
buttons: !include common-buttons.yaml

View File

@@ -4,7 +4,6 @@ remote_transmitter:
carrier_duty_percent: 50%
clock_resolution: ${clock_resolution}
rmt_symbols: ${rmt_symbols}
use_dma: ${use_dma}
packages:
buttons: !include common-buttons.yaml

View File

@@ -1,6 +1,7 @@
substitutions:
pin: GPIO2
rmt_channel: "2"
clock_resolution: "2000000"
rmt_symbols: "64"
packages:
common: !include esp32-common-ard.yaml
common: !include esp32-common.yaml

View File

@@ -1,6 +1,7 @@
substitutions:
pin: GPIO2
rmt_channel: "1"
clock_resolution: "2000000"
rmt_symbols: "64"
packages:
common: !include esp32-common-ard.yaml
common: !include esp32-common.yaml

View File

@@ -2,7 +2,6 @@ substitutions:
pin: GPIO2
clock_resolution: "2000000"
rmt_symbols: "64"
use_dma: "true"
packages:
common: !include esp32-common-idf.yaml
common: !include esp32-common.yaml

View File

@@ -2,7 +2,6 @@ substitutions:
pin: GPIO2
clock_resolution: "2000000"
rmt_symbols: "64"
use_dma: "true"
packages:
common: !include esp32-common-idf.yaml
common: !include esp32-common.yaml

View File

@@ -2,7 +2,10 @@ substitutions:
pin: GPIO38
clock_resolution: "2000000"
rmt_symbols: "64"
use_dma: "true"
packages:
common: !include esp32-common-idf.yaml
common: !include esp32-common.yaml
remote_transmitter:
- id: !extend xmitr
use_dma: "true"

View File

@@ -0,0 +1,38 @@
spi:
- id: quad_spi
type: quad
interface: spi3
clk_pin:
number: 47
data_pins:
- allow_other_uses: true
number: 40
- allow_other_uses: true
number: 41
- allow_other_uses: true
number: 42
- allow_other_uses: true
number: 43
- id: octal_spi
type: octal
interface: hardware
clk_pin:
number: 0
data_pins:
- 36
- 37
- 38
- 39
- allow_other_uses: true
number: 40
- allow_other_uses: true
number: 41
- allow_other_uses: true
number: 42
- allow_other_uses: true
number: 43
- id: spi_id_3
interface: any
clk_pin: 8
mosi_pin: 9

View File

@@ -5,7 +5,7 @@ spi:
miso_pin: ${miso_pin}
spi_device:
id: spi_device_test
data_rate: 2MHz
spi_mode: 3
bit_order: lsb_first
- id: spi_device_test
data_rate: 2MHz
spi_mode: 3
bit_order: lsb_first

View File

@@ -4,3 +4,8 @@ substitutions:
miso_pin: GPIO15
<<: !include common.yaml
spi_device:
- id: spi_device_test
release_device: true
data_rate: 1MHz
spi_mode: 0

View File

@@ -12,6 +12,5 @@ light:
rgb_order: GRB
num_leds: 256
pin: 2
rmt_channel: 0
effects:
- wled:

View File

@@ -12,6 +12,5 @@ light:
rgb_order: GRB
num_leds: 256
pin: 2
rmt_channel: 0
effects:
- wled:

View File

@@ -0,0 +1,12 @@
esp32_ble_tracker:
sensor:
- platform: xiaomi_xmwsdj04mmc
mac_address: 84:B4:DB:5D:A3:8F
bindkey: d8ca2ed09bb5541dc8f045ca360b00ea
temperature:
name: Xiaomi XMWSDJ04MMC Temperature
humidity:
name: Xiaomi XMWSDJ04MMC Humidity
battery_level:
name: Xiaomi XMWSDJ04MMC Battery Level

View File

@@ -0,0 +1 @@
<<: !include common.yaml

View File

@@ -0,0 +1 @@
<<: !include common.yaml

View File

@@ -0,0 +1 @@
<<: !include common.yaml

View File

@@ -0,0 +1 @@
<<: !include common.yaml

View File

@@ -3,7 +3,7 @@
from __future__ import annotations
import asyncio
from collections.abc import AsyncGenerator, Generator
from collections.abc import AsyncGenerator, Callable, Generator
from contextlib import AbstractAsyncContextManager, asynccontextmanager
import logging
import os
@@ -15,7 +15,7 @@ import sys
import tempfile
from typing import TextIO
from aioesphomeapi import APIClient, APIConnectionError, ReconnectLogic
from aioesphomeapi import APIClient, APIConnectionError, LogParser, ReconnectLogic
import pytest
import pytest_asyncio
@@ -46,6 +46,7 @@ if platform.system() == "Windows":
"Integration tests are not supported on Windows", allow_module_level=True
)
import pty # not available on Windows
@@ -119,6 +120,21 @@ async def yaml_config(request: pytest.FixtureRequest, unused_tcp_port: int) -> s
# Add port configuration after api:
content = content.replace("api:", f"api:\n port: {unused_tcp_port}")
# Add debug build flags for integration tests to enable assertions
if "esphome:" in content:
# Check if platformio_options already exists
if "platformio_options:" not in content:
# Add platformio_options with debug flags after esphome:
content = content.replace(
"esphome:",
"esphome:\n"
" # Enable assertions for integration tests\n"
" platformio_options:\n"
" build_flags:\n"
' - "-DDEBUG" # Enable assert() statements\n'
' - "-g" # Add debug symbols',
)
return content
@@ -347,14 +363,30 @@ async def api_client_connected(
async def _read_stream_lines(
stream: asyncio.StreamReader, lines: list[str], output_stream: TextIO
stream: asyncio.StreamReader,
lines: list[str],
output_stream: TextIO,
line_callback: Callable[[str], None] | None = None,
) -> None:
"""Read lines from a stream, append to list, and echo to output stream."""
log_parser = LogParser()
while line := await stream.readline():
decoded_line = line.decode("utf-8", errors="replace")
decoded_line = (
line.replace(b"\r", b"")
.replace(b"\n", b"")
.decode("utf8", "backslashreplace")
)
lines.append(decoded_line.rstrip())
# Echo to stdout/stderr in real-time
print(decoded_line.rstrip(), file=output_stream, flush=True)
# Print without newline to avoid double newlines
print(
log_parser.parse_line(decoded_line, timestamp=""),
file=output_stream,
flush=True,
)
# Call the callback if provided
if line_callback:
line_callback(decoded_line.rstrip())
@asynccontextmanager
@@ -363,6 +395,7 @@ async def run_binary_and_wait_for_port(
host: str,
port: int,
timeout: float = PORT_WAIT_TIMEOUT,
line_callback: Callable[[str], None] | None = None,
) -> AsyncGenerator[None]:
"""Run a binary, wait for it to open a port, and clean up on exit."""
# Create a pseudo-terminal to make the binary think it's running interactively
@@ -410,7 +443,9 @@ async def run_binary_and_wait_for_port(
# Read from output stream
output_tasks = [
asyncio.create_task(
_read_stream_lines(output_reader, stdout_lines, sys.stdout)
_read_stream_lines(
output_reader, stdout_lines, sys.stdout, line_callback
)
)
]
@@ -490,6 +525,7 @@ async def run_compiled_context(
compile_esphome: CompileFunction,
port: int,
port_socket: socket.socket | None = None,
line_callback: Callable[[str], None] | None = None,
) -> AsyncGenerator[None]:
"""Context manager to write, compile and run an ESPHome configuration."""
# Write the YAML config
@@ -503,7 +539,9 @@ async def run_compiled_context(
port_socket.close()
# Run the binary and wait for the API server to start
async with run_binary_and_wait_for_port(binary_path, LOCALHOST, port):
async with run_binary_and_wait_for_port(
binary_path, LOCALHOST, port, line_callback=line_callback
):
yield
@@ -517,7 +555,9 @@ async def run_compiled(
port, port_socket = reserved_tcp_port
def _run_compiled(
yaml_content: str, filename: str | None = None
yaml_content: str,
filename: str | None = None,
line_callback: Callable[[str], None] | None = None,
) -> AbstractAsyncContextManager[asyncio.subprocess.Process]:
return run_compiled_context(
yaml_content,
@@ -526,6 +566,7 @@ async def run_compiled(
compile_esphome,
port,
port_socket,
line_callback=line_callback,
)
yield _run_compiled

View File

@@ -0,0 +1,161 @@
esphome:
name: message-size-batching-test
host:
api:
# Default batch_delay to test batching
logger:
# Create entities that will produce different protobuf header sizes
# Header size depends on: 1 byte indicator + varint(payload_size) + varint(message_type)
# 4-byte header: type < 128, payload < 128
# 5-byte header: type < 128, payload 128-16383 OR type 128+, payload < 128
# 6-byte header: type 128+, payload 128-16383
# Small select with few options - produces small message
select:
- platform: template
name: "Small Select"
id: small_select
optimistic: true
options:
- "Option A"
- "Option B"
initial_option: "Option A"
update_interval: 5.0s
# Medium select with more options - produces medium message
- platform: template
name: "Medium Select"
id: medium_select
optimistic: true
options:
- "Option 001"
- "Option 002"
- "Option 003"
- "Option 004"
- "Option 005"
- "Option 006"
- "Option 007"
- "Option 008"
- "Option 009"
- "Option 010"
- "Option 011"
- "Option 012"
- "Option 013"
- "Option 014"
- "Option 015"
- "Option 016"
- "Option 017"
- "Option 018"
- "Option 019"
- "Option 020"
initial_option: "Option 001"
update_interval: 5.0s
# Large select with many options - produces larger message
- platform: template
name: "Large Select with Many Options to Create Larger Payload"
id: large_select
optimistic: true
options:
- "Long Option Name 001 - This is a longer option name to increase message size"
- "Long Option Name 002 - This is a longer option name to increase message size"
- "Long Option Name 003 - This is a longer option name to increase message size"
- "Long Option Name 004 - This is a longer option name to increase message size"
- "Long Option Name 005 - This is a longer option name to increase message size"
- "Long Option Name 006 - This is a longer option name to increase message size"
- "Long Option Name 007 - This is a longer option name to increase message size"
- "Long Option Name 008 - This is a longer option name to increase message size"
- "Long Option Name 009 - This is a longer option name to increase message size"
- "Long Option Name 010 - This is a longer option name to increase message size"
- "Long Option Name 011 - This is a longer option name to increase message size"
- "Long Option Name 012 - This is a longer option name to increase message size"
- "Long Option Name 013 - This is a longer option name to increase message size"
- "Long Option Name 014 - This is a longer option name to increase message size"
- "Long Option Name 015 - This is a longer option name to increase message size"
- "Long Option Name 016 - This is a longer option name to increase message size"
- "Long Option Name 017 - This is a longer option name to increase message size"
- "Long Option Name 018 - This is a longer option name to increase message size"
- "Long Option Name 019 - This is a longer option name to increase message size"
- "Long Option Name 020 - This is a longer option name to increase message size"
- "Long Option Name 021 - This is a longer option name to increase message size"
- "Long Option Name 022 - This is a longer option name to increase message size"
- "Long Option Name 023 - This is a longer option name to increase message size"
- "Long Option Name 024 - This is a longer option name to increase message size"
- "Long Option Name 025 - This is a longer option name to increase message size"
- "Long Option Name 026 - This is a longer option name to increase message size"
- "Long Option Name 027 - This is a longer option name to increase message size"
- "Long Option Name 028 - This is a longer option name to increase message size"
- "Long Option Name 029 - This is a longer option name to increase message size"
- "Long Option Name 030 - This is a longer option name to increase message size"
- "Long Option Name 031 - This is a longer option name to increase message size"
- "Long Option Name 032 - This is a longer option name to increase message size"
- "Long Option Name 033 - This is a longer option name to increase message size"
- "Long Option Name 034 - This is a longer option name to increase message size"
- "Long Option Name 035 - This is a longer option name to increase message size"
- "Long Option Name 036 - This is a longer option name to increase message size"
- "Long Option Name 037 - This is a longer option name to increase message size"
- "Long Option Name 038 - This is a longer option name to increase message size"
- "Long Option Name 039 - This is a longer option name to increase message size"
- "Long Option Name 040 - This is a longer option name to increase message size"
- "Long Option Name 041 - This is a longer option name to increase message size"
- "Long Option Name 042 - This is a longer option name to increase message size"
- "Long Option Name 043 - This is a longer option name to increase message size"
- "Long Option Name 044 - This is a longer option name to increase message size"
- "Long Option Name 045 - This is a longer option name to increase message size"
- "Long Option Name 046 - This is a longer option name to increase message size"
- "Long Option Name 047 - This is a longer option name to increase message size"
- "Long Option Name 048 - This is a longer option name to increase message size"
- "Long Option Name 049 - This is a longer option name to increase message size"
- "Long Option Name 050 - This is a longer option name to increase message size"
initial_option: "Long Option Name 001 - This is a longer option name to increase message size"
update_interval: 5.0s
# Text sensors with different value lengths
text_sensor:
- platform: template
name: "Short Text Sensor"
id: short_text_sensor
lambda: |-
return {"OK"};
update_interval: 5.0s
- platform: template
name: "Medium Text Sensor"
id: medium_text_sensor
lambda: |-
return {"This is a medium length text sensor value that should produce a medium sized message"};
update_interval: 5.0s
- platform: template
name: "Long Text Sensor with Very Long Value"
id: long_text_sensor
lambda: |-
return {"This is a very long text sensor value that contains a lot of text to ensure we get a larger protobuf message. The message should be long enough to require a 2-byte varint for the payload size, which happens when the payload exceeds 127 bytes. Let's add even more text here to make sure we exceed that threshold and test the batching of messages with different header sizes properly."};
update_interval: 5.0s
# Text input which can have various lengths
text:
- platform: template
name: "Test Text Input"
id: test_text_input
optimistic: true
mode: text
min_length: 0
max_length: 255
initial_value: "Initial value"
update_interval: 5.0s
# Number entity to add variety (different message type number)
# The ListEntitiesNumberResponse has message type 49
# The NumberStateResponse has message type 50
number:
- platform: template
name: "Test Number with Long Name to Increase Message Size"
id: test_number
optimistic: true
min_value: 0
max_value: 1000
step: 0.1
initial_value: 42.0
update_interval: 5.0s

View File

@@ -0,0 +1,96 @@
from esphome import automation
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.const import CONF_COMPONENTS, CONF_ID, CONF_NAME
CODEOWNERS = ["@esphome/tests"]
loop_test_component_ns = cg.esphome_ns.namespace("loop_test_component")
LoopTestComponent = loop_test_component_ns.class_("LoopTestComponent", cg.Component)
LoopTestISRComponent = loop_test_component_ns.class_(
"LoopTestISRComponent", cg.Component
)
CONF_DISABLE_AFTER = "disable_after"
CONF_TEST_REDUNDANT_OPERATIONS = "test_redundant_operations"
CONF_ISR_COMPONENTS = "isr_components"
COMPONENT_CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(LoopTestComponent),
cv.Required(CONF_NAME): cv.string,
cv.Optional(CONF_DISABLE_AFTER, default=0): cv.int_,
cv.Optional(CONF_TEST_REDUNDANT_OPERATIONS, default=False): cv.boolean,
}
)
ISR_COMPONENT_CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(LoopTestISRComponent),
cv.Required(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(LoopTestComponent),
cv.Required(CONF_COMPONENTS): cv.ensure_list(COMPONENT_CONFIG_SCHEMA),
cv.Optional(CONF_ISR_COMPONENTS): cv.ensure_list(ISR_COMPONENT_CONFIG_SCHEMA),
}
).extend(cv.COMPONENT_SCHEMA)
# Define actions
EnableAction = loop_test_component_ns.class_("EnableAction", automation.Action)
DisableAction = loop_test_component_ns.class_("DisableAction", automation.Action)
@automation.register_action(
"loop_test_component.enable",
EnableAction,
cv.Schema(
{
cv.Required(CONF_ID): cv.use_id(LoopTestComponent),
}
),
)
async def enable_to_code(config, action_id, template_arg, args):
parent = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, parent)
return var
@automation.register_action(
"loop_test_component.disable",
DisableAction,
cv.Schema(
{
cv.Required(CONF_ID): cv.use_id(LoopTestComponent),
}
),
)
async def disable_to_code(config, action_id, template_arg, args):
parent = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, parent)
return var
async def to_code(config):
# The parent config doesn't actually create a component
# We just create each sub-component
for comp_config in config[CONF_COMPONENTS]:
var = cg.new_Pvariable(comp_config[CONF_ID])
await cg.register_component(var, comp_config)
cg.add(var.set_name(comp_config[CONF_NAME]))
cg.add(var.set_disable_after(comp_config[CONF_DISABLE_AFTER]))
cg.add(
var.set_test_redundant_operations(
comp_config[CONF_TEST_REDUNDANT_OPERATIONS]
)
)
# Create ISR test components
for isr_config in config.get(CONF_ISR_COMPONENTS, []):
var = cg.new_Pvariable(isr_config[CONF_ID])
await cg.register_component(var, isr_config)
cg.add(var.set_name(isr_config[CONF_NAME]))

View File

@@ -0,0 +1,43 @@
#include "loop_test_component.h"
namespace esphome {
namespace loop_test_component {
void LoopTestComponent::setup() { ESP_LOGI(TAG, "[%s] Setup called", this->name_.c_str()); }
void LoopTestComponent::loop() {
this->loop_count_++;
ESP_LOGI(TAG, "[%s] Loop count: %d", this->name_.c_str(), this->loop_count_);
// Test self-disable after specified count
if (this->disable_after_ > 0 && this->loop_count_ == this->disable_after_) {
ESP_LOGI(TAG, "[%s] Disabling self after %d loops", this->name_.c_str(), this->disable_after_);
this->disable_loop();
}
// Test redundant operations
if (this->test_redundant_operations_ && this->loop_count_ == 5) {
if (this->name_ == "redundant_enable") {
ESP_LOGI(TAG, "[%s] Testing enable when already enabled", this->name_.c_str());
this->enable_loop();
} else if (this->name_ == "redundant_disable") {
ESP_LOGI(TAG, "[%s] Testing disable when will be disabled", this->name_.c_str());
// We'll disable at count 10, but try to disable again at 5
this->disable_loop();
ESP_LOGI(TAG, "[%s] First disable complete", this->name_.c_str());
}
}
}
void LoopTestComponent::service_enable() {
ESP_LOGI(TAG, "[%s] Service enable called", this->name_.c_str());
this->enable_loop();
}
void LoopTestComponent::service_disable() {
ESP_LOGI(TAG, "[%s] Service disable called", this->name_.c_str());
this->disable_loop();
}
} // namespace loop_test_component
} // namespace esphome

View File

@@ -0,0 +1,58 @@
#pragma once
#include "esphome/core/component.h"
#include "esphome/core/log.h"
#include "esphome/core/application.h"
#include "esphome/core/automation.h"
namespace esphome {
namespace loop_test_component {
static const char *const TAG = "loop_test_component";
class LoopTestComponent : public Component {
public:
void set_name(const std::string &name) { this->name_ = name; }
void set_disable_after(int count) { this->disable_after_ = count; }
void set_test_redundant_operations(bool test) { this->test_redundant_operations_ = test; }
void setup() override;
void loop() override;
// Service methods for external control
void service_enable();
void service_disable();
int get_loop_count() const { return this->loop_count_; }
float get_setup_priority() const override { return setup_priority::DATA; }
protected:
std::string name_;
int loop_count_{0};
int disable_after_{0};
bool test_redundant_operations_{false};
};
template<typename... Ts> class EnableAction : public Action<Ts...> {
public:
EnableAction(LoopTestComponent *parent) : parent_(parent) {}
void play(Ts... x) override { this->parent_->service_enable(); }
protected:
LoopTestComponent *parent_;
};
template<typename... Ts> class DisableAction : public Action<Ts...> {
public:
DisableAction(LoopTestComponent *parent) : parent_(parent) {}
void play(Ts... x) override { this->parent_->service_disable(); }
protected:
LoopTestComponent *parent_;
};
} // namespace loop_test_component
} // namespace esphome

View File

@@ -0,0 +1,80 @@
#include "loop_test_isr_component.h"
#include "esphome/core/hal.h"
#include "esphome/core/application.h"
namespace esphome {
namespace loop_test_component {
static const char *const ISR_TAG = "loop_test_isr_component";
void LoopTestISRComponent::setup() {
ESP_LOGI(ISR_TAG, "[%s] ISR component setup called", this->name_.c_str());
this->last_check_time_ = millis();
}
void LoopTestISRComponent::loop() {
this->loop_count_++;
ESP_LOGI(ISR_TAG, "[%s] ISR component loop count: %d", this->name_.c_str(), this->loop_count_);
// Disable after 5 loops
if (this->loop_count_ == 5) {
ESP_LOGI(ISR_TAG, "[%s] Disabling after 5 loops", this->name_.c_str());
this->disable_loop();
this->last_disable_time_ = millis();
// Simulate ISR after disabling
this->set_timeout("simulate_isr_1", 50, [this]() {
ESP_LOGI(ISR_TAG, "[%s] Simulating ISR enable", this->name_.c_str());
this->simulate_isr_enable();
// Test reentrancy - call enable_loop() directly after ISR
// This simulates another thread calling enable_loop while processing ISR enables
this->set_timeout("test_reentrant", 10, [this]() {
ESP_LOGI(ISR_TAG, "[%s] Testing reentrancy - calling enable_loop() directly", this->name_.c_str());
this->enable_loop();
});
});
}
// If we get here after being disabled, it means ISR re-enabled us
if (this->loop_count_ > 5 && this->loop_count_ < 10) {
ESP_LOGI(ISR_TAG, "[%s] Running after ISR re-enable! ISR was called %d times", this->name_.c_str(),
this->isr_call_count_);
}
// Disable again after 10 loops to test multiple ISR enables
if (this->loop_count_ == 10) {
ESP_LOGI(ISR_TAG, "[%s] Disabling again after 10 loops", this->name_.c_str());
this->disable_loop();
this->last_disable_time_ = millis();
// Test pure ISR enable without any main loop enable
this->set_timeout("simulate_isr_2", 50, [this]() {
ESP_LOGI(ISR_TAG, "[%s] Testing pure ISR enable (no main loop enable)", this->name_.c_str());
this->simulate_isr_enable();
// DO NOT call enable_loop() - test that ISR alone works
});
}
// Log when we're running after second ISR enable
if (this->loop_count_ > 10) {
ESP_LOGI(ISR_TAG, "[%s] Running after pure ISR re-enable! ISR was called %d times total", this->name_.c_str(),
this->isr_call_count_);
}
}
void IRAM_ATTR LoopTestISRComponent::simulate_isr_enable() {
// This simulates what would happen in a real ISR
// In a real scenario, this would be called from an actual interrupt handler
this->isr_call_count_++;
// Call enable_loop_soon_any_context multiple times to test that it's safe
this->enable_loop_soon_any_context();
this->enable_loop_soon_any_context(); // Test multiple calls
this->enable_loop_soon_any_context(); // Should be idempotent
// Note: In a real ISR, we cannot use ESP_LOG* macros as they're not ISR-safe
// For testing, we'll track the call count and log it from the main loop
}
} // namespace loop_test_component
} // namespace esphome

View File

@@ -0,0 +1,32 @@
#pragma once
#include "esphome/core/component.h"
#include "esphome/core/log.h"
#include "esphome/core/hal.h"
namespace esphome {
namespace loop_test_component {
class LoopTestISRComponent : public Component {
public:
void set_name(const std::string &name) { this->name_ = name; }
void setup() override;
void loop() override;
// Simulates an ISR calling enable_loop_soon_any_context
void simulate_isr_enable();
float get_setup_priority() const override { return setup_priority::DATA; }
protected:
std::string name_;
int loop_count_{0};
uint32_t last_disable_time_{0};
uint32_t last_check_time_{0};
bool isr_enable_pending_{false};
int isr_call_count_{0};
};
} // namespace loop_test_component
} // namespace esphome

View File

@@ -0,0 +1,55 @@
esphome:
name: host-batch-delay-test
host:
api:
batch_delay: 0ms
logger:
# Add multiple sensors to test batching
sensor:
- platform: template
name: "Test Sensor 1"
id: test_sensor1
lambda: |-
return 1.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 2"
id: test_sensor2
lambda: |-
return 2.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 3"
id: test_sensor3
lambda: |-
return 3.0;
update_interval: 0.1s
binary_sensor:
- platform: template
name: "Test Binary Sensor 1"
id: test_binary_sensor1
lambda: |-
return millis() % 1000 < 500;
- platform: template
name: "Test Binary Sensor 2"
id: test_binary_sensor2
lambda: |-
return millis() % 2000 < 1000;
switch:
- platform: template
name: "Test Switch 1"
id: test_switch1
turn_on_action:
- logger.log: "Switch 1 turned on"
turn_off_action:
- logger.log: "Switch 1 turned off"
- platform: template
name: "Test Switch 2"
id: test_switch2
turn_on_action:
- logger.log: "Switch 2 turned on"
turn_off_action:
- logger.log: "Switch 2 turned off"

View File

@@ -0,0 +1,58 @@
esphome:
name: host-empty-string-test
host:
api:
batch_delay: 50ms
select:
- platform: template
name: "Select Empty First"
id: select_empty_first
optimistic: true
options:
- "" # Empty string at the beginning
- "Option A"
- "Option B"
- "Option C"
initial_option: "Option A"
- platform: template
name: "Select Empty Middle"
id: select_empty_middle
optimistic: true
options:
- "Option 1"
- "Option 2"
- "" # Empty string in the middle
- "Option 3"
- "Option 4"
initial_option: "Option 1"
- platform: template
name: "Select Empty Last"
id: select_empty_last
optimistic: true
options:
- "Choice X"
- "Choice Y"
- "Choice Z"
- "" # Empty string at the end
initial_option: "Choice X"
# Add a sensor to ensure we have other entities in the list
sensor:
- platform: template
name: "Test Sensor"
id: test_sensor
lambda: |-
return 42.0;
update_interval: 60s
binary_sensor:
- platform: template
name: "Test Binary Sensor"
id: test_binary_sensor
lambda: |-
return true;

View File

@@ -0,0 +1,108 @@
esphome:
name: host-test
host:
api:
logger:
# Test various entity types with different flag combinations
sensor:
- platform: template
name: "Test Normal Sensor"
id: normal_sensor
update_interval: 1s
lambda: |-
return 42.0;
- platform: template
name: "Test Internal Sensor"
id: internal_sensor
internal: true
update_interval: 1s
lambda: |-
return 43.0;
- platform: template
name: "Test Disabled Sensor"
id: disabled_sensor
disabled_by_default: true
update_interval: 1s
lambda: |-
return 44.0;
- platform: template
name: "Test Mixed Flags Sensor"
id: mixed_flags_sensor
internal: true
entity_category: diagnostic
update_interval: 1s
lambda: |-
return 45.0;
- platform: template
name: "Test Diagnostic Sensor"
id: diagnostic_sensor
entity_category: diagnostic
update_interval: 1s
lambda: |-
return 46.0;
- platform: template
name: "Test All Flags Sensor"
id: all_flags_sensor
internal: true
disabled_by_default: true
entity_category: diagnostic
update_interval: 1s
lambda: |-
return 47.0;
# Also test other entity types to ensure bit-packing works across all
binary_sensor:
- platform: template
name: "Test Binary Sensor"
entity_category: config
lambda: |-
return true;
text_sensor:
- platform: template
name: "Test Text Sensor"
disabled_by_default: true
lambda: |-
return {"Hello"};
number:
- platform: template
name: "Test Number"
initial_value: 50
min_value: 0
max_value: 100
step: 1
optimistic: true
entity_category: diagnostic
select:
- platform: template
name: "Test Select"
options:
- "Option 1"
- "Option 2"
initial_option: "Option 1"
optimistic: true
internal: true
switch:
- platform: template
name: "Test Switch"
optimistic: true
disabled_by_default: true
entity_category: config
button:
- platform: template
name: "Test Button"
on_press:
- logger.log: "Button pressed"

View File

@@ -0,0 +1,34 @@
esphome:
name: host-test
host:
api:
logger:
# Test fan with preset modes and speed settings
fan:
- platform: template
name: "Test Fan with Presets"
id: test_fan_presets
speed_count: 5
preset_modes:
- "Eco"
- "Sleep"
- "Turbo"
has_oscillating: true
has_direction: true
- platform: template
name: "Test Fan Simple"
id: test_fan_simple
speed_count: 3
has_oscillating: false
has_direction: false
- platform: template
name: "Test Fan No Speed"
id: test_fan_no_speed
has_oscillating: true
has_direction: false

View File

@@ -0,0 +1,322 @@
esphome:
name: host-mode-many-entities
friendly_name: "Host Mode Many Entities Test"
logger:
host:
api:
sensor:
# 50 test sensors with predictable values for batching test
- platform: template
name: "Test Sensor 1"
lambda: return 1.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 2"
lambda: return 2.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 3"
lambda: return 3.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 4"
lambda: return 4.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 5"
lambda: return 5.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 6"
lambda: return 6.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 7"
lambda: return 7.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 8"
lambda: return 8.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 9"
lambda: return 9.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 10"
lambda: return 10.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 11"
lambda: return 11.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 12"
lambda: return 12.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 13"
lambda: return 13.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 14"
lambda: return 14.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 15"
lambda: return 15.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 16"
lambda: return 16.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 17"
lambda: return 17.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 18"
lambda: return 18.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 19"
lambda: return 19.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 20"
lambda: return 20.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 21"
lambda: return 21.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 22"
lambda: return 22.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 23"
lambda: return 23.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 24"
lambda: return 24.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 25"
lambda: return 25.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 26"
lambda: return 26.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 27"
lambda: return 27.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 28"
lambda: return 28.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 29"
lambda: return 29.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 30"
lambda: return 30.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 31"
lambda: return 31.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 32"
lambda: return 32.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 33"
lambda: return 33.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 34"
lambda: return 34.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 35"
lambda: return 35.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 36"
lambda: return 36.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 37"
lambda: return 37.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 38"
lambda: return 38.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 39"
lambda: return 39.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 40"
lambda: return 40.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 41"
lambda: return 41.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 42"
lambda: return 42.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 43"
lambda: return 43.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 44"
lambda: return 44.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 45"
lambda: return 45.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 46"
lambda: return 46.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 47"
lambda: return 47.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 48"
lambda: return 48.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 49"
lambda: return 49.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 50"
lambda: return 50.0;
update_interval: 0.1s
# Mixed entity types for comprehensive batching test
binary_sensor:
- platform: template
name: "Test Binary Sensor 1"
lambda: return millis() % 1000 < 500;
- platform: template
name: "Test Binary Sensor 2"
lambda: return millis() % 2000 < 1000;
switch:
- platform: template
name: "Test Switch 1"
lambda: return true;
turn_on_action:
- logger.log: "Switch 1 ON"
turn_off_action:
- logger.log: "Switch 1 OFF"
- platform: template
name: "Test Switch 2"
lambda: return false;
turn_on_action:
- logger.log: "Switch 2 ON"
turn_off_action:
- logger.log: "Switch 2 OFF"
text_sensor:
- platform: template
name: "Test Text Sensor 1"
lambda: return std::string("Test Value 1");
- platform: template
name: "Test Text Sensor 2"
lambda: return std::string("Test Value 2");
- platform: version
name: "ESPHome Version"
number:
- platform: template
name: "Test Number"
min_value: 0
max_value: 100
step: 1
lambda: return 50.0;
set_action:
- logger.log: "Number set"
select:
- platform: template
name: "Test Select"
options:
- "Option 1"
- "Option 2"
initial_option: "Option 1"
optimistic: true
set_action:
- logger.log: "Select changed"
text:
- platform: template
name: "Test Text"
mode: text
initial_value: "Hello"
set_action:
- logger.log: "Text changed"
valve:
- platform: template
name: "Test Valve"
open_action:
- logger.log: "Valve opening"
close_action:
- logger.log: "Valve closing"
stop_action:
- logger.log: "Valve stopping"
alarm_control_panel:
- platform: template
name: "Test Alarm"
codes:
- "1234"
arming_away_time: 0s
arming_home_time: 0s
pending_time: 0s
trigger_time: 300s
restore_mode: ALWAYS_DISARMED
on_disarmed:
- logger.log: "Alarm disarmed"
on_arming:
- logger.log: "Alarm arming"
on_armed_away:
- logger.log: "Alarm armed away"
on_armed_home:
- logger.log: "Alarm armed home"
on_pending:
- logger.log: "Alarm pending"
on_triggered:
- logger.log: "Alarm triggered"
event:
- platform: template
name: "Test Event"
event_types:
- first_event
- second_event
button:
- platform: template
name: "Test Button"
on_press:
- logger.log: "Button pressed"

View File

@@ -0,0 +1,136 @@
esphome:
name: host-mode-many-entities-multi
friendly_name: "Host Mode Many Entities Multiple Connections Test"
logger:
host:
api:
sensor:
# 20 test sensors for faster testing with multiple connections
- platform: template
name: "Test Sensor 1"
lambda: return 1.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 2"
lambda: return 2.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 3"
lambda: return 3.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 4"
lambda: return 4.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 5"
lambda: return 5.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 6"
lambda: return 6.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 7"
lambda: return 7.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 8"
lambda: return 8.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 9"
lambda: return 9.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 10"
lambda: return 10.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 11"
lambda: return 11.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 12"
lambda: return 12.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 13"
lambda: return 13.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 14"
lambda: return 14.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 15"
lambda: return 15.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 16"
lambda: return 16.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 17"
lambda: return 17.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 18"
lambda: return 18.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 19"
lambda: return 19.0;
update_interval: 0.1s
- platform: template
name: "Test Sensor 20"
lambda: return 20.0;
update_interval: 0.1s
# Mixed entity types for comprehensive batching test
binary_sensor:
- platform: template
name: "Test Binary Sensor 1"
lambda: return millis() % 1000 < 500;
- platform: template
name: "Test Binary Sensor 2"
lambda: return millis() % 2000 < 1000;
text_sensor:
- platform: template
name: "Test Text Sensor 1"
lambda: return std::string("Test Value 1");
- platform: template
name: "Test Text Sensor 2"
lambda: return std::string("Test Value 2");
- platform: version
name: "ESPHome Version"
switch:
- platform: template
name: "Test Switch 1"
lambda: return true;
turn_on_action:
- logger.log: "Switch 1 ON"
turn_off_action:
- logger.log: "Switch 1 OFF"
button:
- platform: template
name: "Test Button"
on_press:
- logger.log: "Button pressed"
number:
- platform: template
name: "Test Number"
min_value: 0
max_value: 100
step: 1
lambda: return 50.0;
set_action:
- logger.log: "Number set"

View File

@@ -5,3 +5,46 @@ api:
encryption:
key: N4Yle5YirwZhPiHHsdZLdOA73ndj/84veVaLhTvxCuU=
logger:
# Test sensors to verify batching works with noise encryption
sensor:
- platform: template
name: "Noise Test Sensor 1"
lambda: return 1.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 2"
lambda: return 2.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 3"
lambda: return 3.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 4"
lambda: return 4.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 5"
lambda: return 5.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 6"
lambda: return 6.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 7"
lambda: return 7.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 8"
lambda: return 8.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 9"
lambda: return 9.0;
update_interval: 2s
- platform: template
name: "Noise Test Sensor 10"
lambda: return 10.0;
update_interval: 2s

View File

@@ -0,0 +1,137 @@
esphome:
name: large-message-test
host:
api:
logger:
# Create a select entity with many options to exceed 1390 bytes
select:
- platform: template
name: "Large Select"
id: large_select
optimistic: true
options:
- "Option 000 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 001 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 002 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 003 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 004 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 005 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 006 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 007 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 008 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 009 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 010 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 011 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 012 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 013 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 014 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 015 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 016 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 017 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 018 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 019 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 020 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 021 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 022 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 023 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 024 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 025 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 026 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 027 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 028 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 029 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 030 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 031 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 032 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 033 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 034 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 035 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 036 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 037 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 038 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 039 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 040 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 041 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 042 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 043 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 044 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 045 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 046 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 047 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 048 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 049 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 050 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 051 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 052 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 053 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 054 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 055 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 056 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 057 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 058 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 059 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 060 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 061 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 062 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 063 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 064 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 065 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 066 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 067 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 068 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 069 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 070 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 071 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 072 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 073 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 074 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 075 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 076 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 077 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 078 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 079 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 080 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 081 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 082 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 083 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 084 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 085 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 086 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 087 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 088 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 089 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 090 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 091 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 092 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 093 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 094 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 095 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 096 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 097 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 098 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
- "Option 099 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
initial_option: "Option 000 - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
# Add some other entities to test batching with the large select
sensor:
- platform: template
name: "Test Sensor"
id: test_sensor
lambda: |-
return 42.0;
update_interval: 1s
binary_sensor:
- platform: template
name: "Test Binary Sensor"
id: test_binary_sensor
lambda: |-
return true;
switch:
- platform: template
name: "Test Switch"
id: test_switch
optimistic: true

View File

@@ -0,0 +1,53 @@
esphome:
name: loop-test
host:
api:
logger:
level: DEBUG
external_components:
- source:
type: local
path: EXTERNAL_COMPONENT_PATH
loop_test_component:
components:
# Component that disables itself after 10 loops
- id: self_disable_10
name: "self_disable_10"
disable_after: 10
# Component that never disables itself (for re-enable test)
- id: normal_component
name: "normal_component"
disable_after: 0
# Component that tests enable when already enabled
- id: redundant_enable
name: "redundant_enable"
test_redundant_operations: true
disable_after: 0
# Component that tests disable when already disabled
- id: redundant_disable
name: "redundant_disable"
test_redundant_operations: true
disable_after: 10
# ISR test component that uses enable_loop_soon_any_context
isr_components:
- id: isr_test
name: "isr_test"
# Interval to re-enable the self_disable_10 component after some time
interval:
- interval: 0.5s
then:
- if:
condition:
lambda: 'return id(self_disable_10).get_loop_count() == 10;'
then:
- logger.log: "Re-enabling self_disable_10 via service"
- loop_test_component.enable:
id: self_disable_10

View File

@@ -0,0 +1,194 @@
"""Integration test for API batching with various message sizes."""
from __future__ import annotations
import asyncio
from aioesphomeapi import EntityState, NumberInfo, SelectInfo, TextInfo, TextSensorInfo
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_api_message_size_batching(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test API can batch messages of various sizes correctly."""
# Write, compile and run the ESPHome device, then connect to API
loop = asyncio.get_running_loop()
async with run_compiled(yaml_config), api_client_connected() as client:
# Verify we can get device info
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "message-size-batching-test"
# List entities - this will batch various sized messages together
entity_info, services = await asyncio.wait_for(
client.list_entities_services(), timeout=5.0
)
# Count different entity types
selects = []
text_sensors = []
text_inputs = []
numbers = []
other_entities = []
for entity in entity_info:
if isinstance(entity, SelectInfo):
selects.append(entity)
elif isinstance(entity, TextSensorInfo):
text_sensors.append(entity)
elif isinstance(entity, TextInfo):
text_inputs.append(entity)
elif isinstance(entity, NumberInfo):
numbers.append(entity)
else:
other_entities.append(entity)
# Verify we have our test entities - exact counts
assert len(selects) == 3, (
f"Expected exactly 3 select entities, got {len(selects)}"
)
assert len(text_sensors) == 3, (
f"Expected exactly 3 text sensor entities, got {len(text_sensors)}"
)
assert len(text_inputs) == 1, (
f"Expected exactly 1 text input entity, got {len(text_inputs)}"
)
# Collect all select entity object_ids for error messages
select_ids = [s.object_id for s in selects]
# Find our specific test entities
small_select = None
medium_select = None
large_select = None
for select in selects:
if select.object_id == "small_select":
small_select = select
elif select.object_id == "medium_select":
medium_select = select
elif (
select.object_id
== "large_select_with_many_options_to_create_larger_payload"
):
large_select = select
assert small_select is not None, (
f"Could not find small_select entity. Found: {select_ids}"
)
assert medium_select is not None, (
f"Could not find medium_select entity. Found: {select_ids}"
)
assert large_select is not None, (
f"Could not find large_select entity. Found: {select_ids}"
)
# Verify the selects have the expected number of options
assert len(small_select.options) == 2, (
f"Expected 2 options for small_select, got {len(small_select.options)}"
)
assert len(medium_select.options) == 20, (
f"Expected 20 options for medium_select, got {len(medium_select.options)}"
)
assert len(large_select.options) == 50, (
f"Expected 50 options for large_select, got {len(large_select.options)}"
)
# Collect all text sensor object_ids for error messages
text_sensor_ids = [t.object_id for t in text_sensors]
# Verify text sensors with different value lengths
short_text_sensor = None
medium_text_sensor = None
long_text_sensor = None
for text_sensor in text_sensors:
if text_sensor.object_id == "short_text_sensor":
short_text_sensor = text_sensor
elif text_sensor.object_id == "medium_text_sensor":
medium_text_sensor = text_sensor
elif text_sensor.object_id == "long_text_sensor_with_very_long_value":
long_text_sensor = text_sensor
assert short_text_sensor is not None, (
f"Could not find short_text_sensor. Found: {text_sensor_ids}"
)
assert medium_text_sensor is not None, (
f"Could not find medium_text_sensor. Found: {text_sensor_ids}"
)
assert long_text_sensor is not None, (
f"Could not find long_text_sensor. Found: {text_sensor_ids}"
)
# Check text input which can have a long max_length
text_input = None
text_input_ids = [t.object_id for t in text_inputs]
for ti in text_inputs:
if ti.object_id == "test_text_input":
text_input = ti
break
assert text_input is not None, (
f"Could not find test_text_input. Found: {text_input_ids}"
)
assert text_input.max_length == 255, (
f"Expected max_length 255, got {text_input.max_length}"
)
# Verify total entity count - messages of various sizes were batched successfully
# We have: 3 selects + 3 text sensors + 1 text input + 1 number = 8 total
total_entities = len(entity_info)
assert total_entities == 8, f"Expected exactly 8 entities, got {total_entities}"
# Check we have the expected entity types
assert len(numbers) == 1, (
f"Expected exactly 1 number entity, got {len(numbers)}"
)
assert len(other_entities) == 0, (
f"Unexpected entity types found: {[type(e).__name__ for e in other_entities]}"
)
# Subscribe to state changes to verify batching works
# Collect keys from entity info to know what states to expect
expected_keys = {entity.key for entity in entity_info}
assert len(expected_keys) == 8, (
f"Expected 8 unique entity keys, got {len(expected_keys)}"
)
received_keys: set[int] = set()
states_future: asyncio.Future[None] = loop.create_future()
def on_state(state: EntityState) -> None:
"""Track when states are received."""
received_keys.add(state.key)
# Check if we've received states from all expected entities
if expected_keys.issubset(received_keys) and not states_future.done():
states_future.set_result(None)
client.subscribe_states(on_state)
# Wait for states with timeout
try:
await asyncio.wait_for(states_future, timeout=5.0)
except asyncio.TimeoutError:
missing_keys = expected_keys - received_keys
pytest.fail(
f"Did not receive states from all entities within 5 seconds. "
f"Missing keys: {missing_keys}, "
f"Received {len(received_keys)} of {len(expected_keys)} expected states"
)
# Verify we received states from all entities
assert expected_keys.issubset(received_keys)
# Check that various message sizes were handled correctly
# Small messages (4-byte header): type < 128, payload < 128
# Medium messages (5-byte header): type < 128, payload 128-16383 OR type 128+, payload < 128
# Large messages (6-byte header): type 128+, payload 128-16383

View File

@@ -0,0 +1,80 @@
"""Integration test for API batch_delay setting."""
from __future__ import annotations
import asyncio
import time
from aioesphomeapi import EntityState
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_host_mode_batch_delay(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test API with batch_delay set to 0ms - messages should be sent immediately without batching."""
# Write, compile and run the ESPHome device, then connect to API
loop = asyncio.get_running_loop()
async with run_compiled(yaml_config), api_client_connected() as client:
# Verify we can get device info
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "host-batch-delay-test"
# Subscribe to state changes
states: dict[int, EntityState] = {}
state_timestamps: dict[int, float] = {}
entity_count_future: asyncio.Future[int] = loop.create_future()
def on_state(state: EntityState) -> None:
"""Track when states are received."""
states[state.key] = state
state_timestamps[state.key] = time.monotonic()
# When we have received all expected entities, resolve the future
if len(states) >= 7 and not entity_count_future.done():
entity_count_future.set_result(len(states))
client.subscribe_states(on_state)
# Wait for states from all entities with timeout
try:
entity_count = await asyncio.wait_for(entity_count_future, timeout=5.0)
except asyncio.TimeoutError:
pytest.fail(
f"Did not receive states from at least 7 entities within 5 seconds. "
f"Received {len(states)} states"
)
# Verify we received all states
assert entity_count >= 7, f"Expected at least 7 entities, got {entity_count}"
assert len(states) >= 7 # 3 sensors + 2 binary sensors + 2 switches
# When batch_delay is 0ms, states are sent immediately without batching
# This means each state arrives in its own packet, which may actually be slower
# than batching due to network overhead
if state_timestamps:
first_timestamp = min(state_timestamps.values())
last_timestamp = max(state_timestamps.values())
time_spread = last_timestamp - first_timestamp
# With batch_delay=0ms, states arrive individually which may take longer
# We just verify they all arrive within a reasonable time
assert time_spread < 1.0, f"States took {time_spread:.3f}s to arrive"
# Also test list_entities - with batch_delay=0ms each entity is sent separately
start_time = time.monotonic()
entity_info, services = await client.list_entities_services()
end_time = time.monotonic()
list_time = end_time - start_time
# Verify we got all expected entities
assert len(entity_info) >= 7 # 3 sensors + 2 binary sensors + 2 switches
# With batch_delay=0ms, listing sends each entity separately which may be slower
assert list_time < 1.0, f"list_entities took {list_time:.3f}s"

View File

@@ -0,0 +1,110 @@
"""Integration test for protobuf encoding of empty string options in select entities."""
from __future__ import annotations
import asyncio
from aioesphomeapi import EntityState, SelectInfo
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_host_mode_empty_string_options(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test that select entities with empty string options are correctly encoded in protobuf messages.
This tests the fix for the bug where the force parameter was not passed in encode_string,
causing empty strings in repeated fields to be skipped during encoding but included in
size calculation, leading to protobuf decoding errors.
"""
# Write, compile and run the ESPHome device, then connect to API
loop = asyncio.get_running_loop()
async with run_compiled(yaml_config), api_client_connected() as client:
# Verify we can get device info
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "host-empty-string-test"
# Get list of entities - this will encode ListEntitiesSelectResponse messages
# with empty string options that would trigger the bug
entity_info, services = await client.list_entities_services()
# Find our select entities
select_entities = [e for e in entity_info if isinstance(e, SelectInfo)]
assert len(select_entities) == 3, (
f"Expected 3 select entities, got {len(select_entities)}"
)
# Verify each select entity by name and check their options
selects_by_name = {e.name: e for e in select_entities}
# Check "Select Empty First" - empty string at beginning
assert "Select Empty First" in selects_by_name
empty_first = selects_by_name["Select Empty First"]
assert len(empty_first.options) == 4
assert empty_first.options[0] == "" # Empty string at beginning
assert empty_first.options[1] == "Option A"
assert empty_first.options[2] == "Option B"
assert empty_first.options[3] == "Option C"
# Check "Select Empty Middle" - empty string in middle
assert "Select Empty Middle" in selects_by_name
empty_middle = selects_by_name["Select Empty Middle"]
assert len(empty_middle.options) == 5
assert empty_middle.options[0] == "Option 1"
assert empty_middle.options[1] == "Option 2"
assert empty_middle.options[2] == "" # Empty string in middle
assert empty_middle.options[3] == "Option 3"
assert empty_middle.options[4] == "Option 4"
# Check "Select Empty Last" - empty string at end
assert "Select Empty Last" in selects_by_name
empty_last = selects_by_name["Select Empty Last"]
assert len(empty_last.options) == 4
assert empty_last.options[0] == "Choice X"
assert empty_last.options[1] == "Choice Y"
assert empty_last.options[2] == "Choice Z"
assert empty_last.options[3] == "" # Empty string at end
# If we got here without protobuf decoding errors, the fix is working
# The bug would have caused "Invalid protobuf message" errors with trailing bytes
# Also verify we can interact with the select entities
# Subscribe to state changes
states: dict[int, EntityState] = {}
state_change_future: asyncio.Future[None] = loop.create_future()
def on_state(state: EntityState) -> None:
"""Track state changes."""
states[state.key] = state
# When we receive the state change for our select, resolve the future
if state.key == empty_first.key and not state_change_future.done():
state_change_future.set_result(None)
client.subscribe_states(on_state)
# Try setting a select to an empty string option
# This further tests that empty strings are handled correctly
client.select_command(empty_first.key, "")
# Wait for state update with timeout
try:
await asyncio.wait_for(state_change_future, timeout=5.0)
except asyncio.TimeoutError:
pytest.fail(
"Did not receive state update after setting select to empty string"
)
# Verify the state was set to empty string
assert empty_first.key in states
select_state = states[empty_first.key]
assert hasattr(select_state, "state")
assert select_state.state == ""
# The test passes if no protobuf decoding errors occurred
# With the bug, we would have gotten "Invalid protobuf message" errors

View File

@@ -0,0 +1,93 @@
"""Integration test for entity bit-packed fields."""
from __future__ import annotations
import asyncio
from aioesphomeapi import EntityCategory, EntityState
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_host_mode_entity_fields(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test entity bit-packed fields work correctly with all possible values."""
# Write, compile and run the ESPHome device, then connect to API
async with run_compiled(yaml_config), api_client_connected() as client:
# Get all entities
entities = await client.list_entities_services()
# Create a map of entity names to entity info
entity_map = {}
for entity in entities[0]:
if hasattr(entity, "name"):
entity_map[entity.name] = entity
# Test entities that should be visible via API (non-internal)
visible_test_cases = [
# (entity_name, expected_disabled_by_default, expected_entity_category)
("Test Normal Sensor", False, EntityCategory.NONE),
("Test Disabled Sensor", True, EntityCategory.NONE),
("Test Diagnostic Sensor", False, EntityCategory.DIAGNOSTIC),
("Test Switch", True, EntityCategory.CONFIG),
("Test Binary Sensor", False, EntityCategory.CONFIG),
("Test Number", False, EntityCategory.DIAGNOSTIC),
]
# Test entities that should NOT be visible via API (internal)
internal_entities = [
"Test Internal Sensor",
"Test Mixed Flags Sensor",
"Test All Flags Sensor",
"Test Select",
]
# Verify visible entities
for entity_name, expected_disabled, expected_category in visible_test_cases:
assert entity_name in entity_map, (
f"Entity '{entity_name}' not found - it should be visible via API"
)
entity = entity_map[entity_name]
# Check disabled_by_default flag
assert entity.disabled_by_default == expected_disabled, (
f"{entity_name}: disabled_by_default flag mismatch - "
f"expected {expected_disabled}, got {entity.disabled_by_default}"
)
# Check entity_category
assert entity.entity_category == expected_category, (
f"{entity_name}: entity_category mismatch - "
f"expected {expected_category}, got {entity.entity_category}"
)
# Verify internal entities are NOT visible
for entity_name in internal_entities:
assert entity_name not in entity_map, (
f"Entity '{entity_name}' found in API response - "
f"internal entities should not be exposed via API"
)
# Subscribe to states to verify has_state flag works
states: dict[int, EntityState] = {}
state_received = asyncio.Event()
def on_state(state: EntityState) -> None:
states[state.key] = state
state_received.set()
client.subscribe_states(on_state)
# Wait for at least one state
try:
await asyncio.wait_for(state_received.wait(), timeout=5.0)
except asyncio.TimeoutError:
pytest.fail("No states received within 5 seconds")
# Verify we received states (which means has_state flag is working)
assert len(states) > 0, "No states received - has_state flag may not be working"

View File

@@ -0,0 +1,152 @@
"""Integration test for fan preset mode behavior."""
from __future__ import annotations
import asyncio
from aioesphomeapi import FanInfo, FanState
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_host_mode_fan_preset(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test fan preset mode behavior according to Home Assistant guidelines."""
# Write, compile and run the ESPHome device, then connect to API
async with run_compiled(yaml_config), api_client_connected() as client:
# Get all fan entities
entities = await client.list_entities_services()
fans: list[FanInfo] = []
for entity_list in entities:
for entity in entity_list:
if isinstance(entity, FanInfo):
fans.append(entity)
# Create a map of fan names to entity info
fan_map = {fan.name: fan for fan in fans}
# Verify we have our test fans
assert "Test Fan with Presets" in fan_map
assert "Test Fan Simple" in fan_map
assert "Test Fan No Speed" in fan_map
# Get fan with presets
fan_presets = fan_map["Test Fan with Presets"]
assert fan_presets.supports_speed is True
assert fan_presets.supported_speed_count == 5
assert fan_presets.supports_oscillation is True
assert fan_presets.supports_direction is True
assert set(fan_presets.supported_preset_modes) == {"Eco", "Sleep", "Turbo"}
# Subscribe to states
states: dict[int, FanState] = {}
state_event = asyncio.Event()
def on_state(state: FanState) -> None:
if isinstance(state, FanState):
states[state.key] = state
state_event.set()
client.subscribe_states(on_state)
# Test 1: Turn on fan without speed or preset - should set speed to 100%
state_event.clear()
client.fan_command(
key=fan_presets.key,
state=True,
)
await asyncio.wait_for(state_event.wait(), timeout=2.0)
fan_state = states[fan_presets.key]
assert fan_state.state is True
assert fan_state.speed_level == 5 # Should be max speed (100%)
assert fan_state.preset_mode == ""
# Turn off
state_event.clear()
client.fan_command(
key=fan_presets.key,
state=False,
)
await asyncio.wait_for(state_event.wait(), timeout=2.0)
# Test 2: Turn on fan with preset mode - should NOT set speed to 100%
state_event.clear()
client.fan_command(
key=fan_presets.key,
state=True,
preset_mode="Eco",
)
await asyncio.wait_for(state_event.wait(), timeout=2.0)
fan_state = states[fan_presets.key]
assert fan_state.state is True
assert fan_state.preset_mode == "Eco"
# Speed should be whatever the preset sets, not forced to 100%
# Test 3: Setting speed should clear preset mode
state_event.clear()
client.fan_command(
key=fan_presets.key,
speed_level=3,
)
await asyncio.wait_for(state_event.wait(), timeout=2.0)
fan_state = states[fan_presets.key]
assert fan_state.state is True
assert fan_state.speed_level == 3
assert fan_state.preset_mode == "" # Preset mode should be cleared
# Test 4: Setting preset mode should work when fan is already on
state_event.clear()
client.fan_command(
key=fan_presets.key,
preset_mode="Sleep",
)
await asyncio.wait_for(state_event.wait(), timeout=2.0)
fan_state = states[fan_presets.key]
assert fan_state.state is True
assert fan_state.preset_mode == "Sleep"
# Turn off
state_event.clear()
client.fan_command(
key=fan_presets.key,
state=False,
)
await asyncio.wait_for(state_event.wait(), timeout=2.0)
# Test 5: Turn on fan with specific speed
state_event.clear()
client.fan_command(
key=fan_presets.key,
state=True,
speed_level=2,
)
await asyncio.wait_for(state_event.wait(), timeout=2.0)
fan_state = states[fan_presets.key]
assert fan_state.state is True
assert fan_state.speed_level == 2
assert fan_state.preset_mode == ""
# Test 6: Test fan with no speed support
fan_no_speed = fan_map["Test Fan No Speed"]
assert fan_no_speed.supports_speed is False
state_event.clear()
client.fan_command(
key=fan_no_speed.key,
state=True,
)
await asyncio.wait_for(state_event.wait(), timeout=2.0)
fan_state = states[fan_no_speed.key]
assert fan_state.state is True
# No speed should be set for fans that don't support speed

View File

@@ -0,0 +1,57 @@
"""Integration test for many entities to test API batching."""
from __future__ import annotations
import asyncio
from aioesphomeapi import EntityState
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_host_mode_many_entities(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test API batching with many entities of different types."""
# Write, compile and run the ESPHome device, then connect to API
loop = asyncio.get_running_loop()
async with run_compiled(yaml_config), api_client_connected() as client:
# Subscribe to state changes
states: dict[int, EntityState] = {}
entity_count_future: asyncio.Future[int] = loop.create_future()
def on_state(state: EntityState) -> None:
states[state.key] = state
# When we have received states from a good number of entities, resolve the future
if len(states) >= 50 and not entity_count_future.done():
entity_count_future.set_result(len(states))
client.subscribe_states(on_state)
# Wait for states from at least 50 entities with timeout
try:
entity_count = await asyncio.wait_for(entity_count_future, timeout=10.0)
except asyncio.TimeoutError:
pytest.fail(
f"Did not receive states from at least 50 entities within 10 seconds. "
f"Received {len(states)} states: {list(states.keys())}"
)
# Verify we received a good number of entity states
assert entity_count >= 50, f"Expected at least 50 entities, got {entity_count}"
assert len(states) >= 50, f"Expected at least 50 states, got {len(states)}"
# Verify we have different entity types by checking some expected values
sensor_states = [
s
for s in states.values()
if hasattr(s, "state") and isinstance(s.state, float)
]
assert len(sensor_states) >= 50, (
f"Expected at least 50 sensor states, got {len(sensor_states)}"
)

View File

@@ -0,0 +1,71 @@
"""Integration test for shared buffer optimization with multiple API connections."""
from __future__ import annotations
import asyncio
from aioesphomeapi import EntityState
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_host_mode_many_entities_multiple_connections(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test shared buffer optimization with multiple API connections."""
# Write, compile and run the ESPHome device
loop = asyncio.get_running_loop()
async with (
run_compiled(yaml_config),
api_client_connected() as client1,
api_client_connected() as client2,
):
# Subscribe both clients to state changes
states1: dict[int, EntityState] = {}
states2: dict[int, EntityState] = {}
client1_ready = loop.create_future()
client2_ready = loop.create_future()
def on_state1(state: EntityState) -> None:
states1[state.key] = state
if len(states1) >= 20 and not client1_ready.done():
client1_ready.set_result(len(states1))
def on_state2(state: EntityState) -> None:
states2[state.key] = state
if len(states2) >= 20 and not client2_ready.done():
client2_ready.set_result(len(states2))
client1.subscribe_states(on_state1)
client2.subscribe_states(on_state2)
# Wait for both clients to receive states
try:
count1, count2 = await asyncio.gather(
asyncio.wait_for(client1_ready, timeout=10.0),
asyncio.wait_for(client2_ready, timeout=10.0),
)
except asyncio.TimeoutError:
pytest.fail(
f"One or both clients did not receive enough states within 10 seconds. "
f"Client1: {len(states1)}, Client2: {len(states2)}"
)
# Verify both clients received states successfully
assert count1 >= 20, (
f"Client 1 should have received at least 20 states, got {count1}"
)
assert count2 >= 20, (
f"Client 2 should have received at least 20 states, got {count2}"
)
# Verify both clients received the same entity keys (same device state)
common_keys = set(states1.keys()) & set(states2.keys())
assert len(common_keys) >= 20, (
f"Expected at least 20 common entity keys, got {len(common_keys)}"
)

View File

@@ -0,0 +1,59 @@
"""Integration test for API handling of large messages exceeding batch size."""
from __future__ import annotations
from aioesphomeapi import SelectInfo
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_large_message_batching(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test API can handle large messages (>1390 bytes) in batches."""
# Write, compile and run the ESPHome device, then connect to API
async with run_compiled(yaml_config), api_client_connected() as client:
# Verify we can get device info
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "large-message-test"
# List entities - this will include our select with many options
entity_info, services = await client.list_entities_services()
# Find our large select entity
large_select = None
for entity in entity_info:
if isinstance(entity, SelectInfo) and entity.object_id == "large_select":
large_select = entity
break
assert large_select is not None, "Could not find large_select entity"
# Verify the select has all its options
# We created 100 options with long names
assert len(large_select.options) == 100, (
f"Expected 100 options, got {len(large_select.options)}"
)
# Verify all options are present and correct
for i in range(100):
expected_option = f"Option {i:03d} - This is a very long option name to make the message larger than the typical batch size of 1390 bytes"
assert expected_option in large_select.options, (
f"Missing option: {expected_option}"
)
# Also verify we can still receive other entities in the same batch
# Count total entities - should have at least our select plus some sensors
entity_count = len(entity_info)
assert entity_count >= 4, f"Expected at least 4 entities, got {entity_count}"
# Verify we have different entity types (not just selects)
entity_types = {type(entity).__name__ for entity in entity_info}
assert len(entity_types) >= 2, (
f"Expected multiple entity types, got {entity_types}"
)

View File

@@ -0,0 +1,207 @@
"""Integration test for loop disable/enable functionality."""
from __future__ import annotations
import asyncio
from pathlib import Path
import re
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_loop_disable_enable(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test that components can disable and enable their loop() method."""
# Get the absolute path to the external components directory
external_components_path = str(
Path(__file__).parent / "fixtures" / "external_components"
)
# Replace the placeholder in the YAML config with the actual path
yaml_config = yaml_config.replace(
"EXTERNAL_COMPONENT_PATH", external_components_path
)
# Track log messages and events
log_messages: list[str] = []
# Event fired when self_disable_10 component disables itself after 10 loops
self_disable_10_disabled = asyncio.Event()
# Event fired when normal_component reaches 10 loops
normal_component_10_loops = asyncio.Event()
# Event fired when redundant_enable component tests enabling when already enabled
redundant_enable_tested = asyncio.Event()
# Event fired when redundant_disable component tests disabling when already disabled
redundant_disable_tested = asyncio.Event()
# Event fired when self_disable_10 component is re-enabled and runs again (count > 10)
self_disable_10_re_enabled = asyncio.Event()
# Events for ISR component testing
isr_component_disabled = asyncio.Event()
isr_component_re_enabled = asyncio.Event()
isr_component_pure_re_enabled = asyncio.Event()
# Track loop counts for components
self_disable_10_counts: list[int] = []
normal_component_counts: list[int] = []
isr_component_counts: list[int] = []
def on_log_line(line: str) -> None:
"""Process each log line from the process output."""
# Strip ANSI color codes
clean_line = re.sub(r"\x1b\[[0-9;]*m", "", line)
if (
"loop_test_component" not in clean_line
and "loop_test_isr_component" not in clean_line
):
return
log_messages.append(clean_line)
# Track specific events using the cleaned line
if "[self_disable_10]" in clean_line:
if "Loop count:" in clean_line:
# Extract loop count
try:
count = int(clean_line.split("Loop count: ")[1])
self_disable_10_counts.append(count)
# Check if component was re-enabled (count > 10)
if count > 10:
self_disable_10_re_enabled.set()
except (IndexError, ValueError):
pass
elif "Disabling self after 10 loops" in clean_line:
self_disable_10_disabled.set()
elif "[normal_component]" in clean_line and "Loop count:" in clean_line:
try:
count = int(clean_line.split("Loop count: ")[1])
normal_component_counts.append(count)
if count >= 10:
normal_component_10_loops.set()
except (IndexError, ValueError):
pass
elif (
"[redundant_enable]" in clean_line
and "Testing enable when already enabled" in clean_line
):
redundant_enable_tested.set()
elif (
"[redundant_disable]" in clean_line
and "Testing disable when will be disabled" in clean_line
):
redundant_disable_tested.set()
# ISR component events
elif "[isr_test]" in clean_line:
if "ISR component loop count:" in clean_line:
count = int(clean_line.split("ISR component loop count: ")[1])
isr_component_counts.append(count)
elif "Disabling after 5 loops" in clean_line:
isr_component_disabled.set()
elif "Running after ISR re-enable!" in clean_line:
isr_component_re_enabled.set()
elif "Running after pure ISR re-enable!" in clean_line:
isr_component_pure_re_enabled.set()
# Write, compile and run the ESPHome device with log callback
async with (
run_compiled(yaml_config, line_callback=on_log_line),
api_client_connected() as client,
):
# Verify we can connect and get device info
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "loop-test"
# Wait for self_disable_10 to disable itself
try:
await asyncio.wait_for(self_disable_10_disabled.wait(), timeout=10.0)
except asyncio.TimeoutError:
pytest.fail("self_disable_10 did not disable itself within 10 seconds")
# Verify it ran at least 10 times before disabling
assert len([c for c in self_disable_10_counts if c <= 10]) == 10, (
f"Expected exactly 10 loops before disable, got {[c for c in self_disable_10_counts if c <= 10]}"
)
assert self_disable_10_counts[:10] == list(range(1, 11)), (
f"Expected first 10 counts to be 1-10, got {self_disable_10_counts[:10]}"
)
# Wait for normal_component to run at least 10 times
try:
await asyncio.wait_for(normal_component_10_loops.wait(), timeout=10.0)
except asyncio.TimeoutError:
pytest.fail(
f"normal_component did not reach 10 loops within timeout, got {len(normal_component_counts)}"
)
# Wait for redundant operation tests
try:
await asyncio.wait_for(redundant_enable_tested.wait(), timeout=10.0)
except asyncio.TimeoutError:
pytest.fail("redundant_enable did not test enabling when already enabled")
try:
await asyncio.wait_for(redundant_disable_tested.wait(), timeout=10.0)
except asyncio.TimeoutError:
pytest.fail(
"redundant_disable did not test disabling when will be disabled"
)
# Wait to see if self_disable_10 gets re-enabled
try:
await asyncio.wait_for(self_disable_10_re_enabled.wait(), timeout=5.0)
except asyncio.TimeoutError:
pytest.fail("self_disable_10 was not re-enabled within 5 seconds")
# Component was re-enabled - verify it ran more times
later_self_disable_counts = [c for c in self_disable_10_counts if c > 10]
assert later_self_disable_counts, (
"self_disable_10 was re-enabled but did not run additional times"
)
# Test ISR component functionality
# Wait for ISR component to disable itself after 5 loops
try:
await asyncio.wait_for(isr_component_disabled.wait(), timeout=3.0)
except asyncio.TimeoutError:
pytest.fail("ISR component did not disable itself within 3 seconds")
# Verify it ran exactly 5 times before disabling
first_run_counts = [c for c in isr_component_counts if c <= 5]
assert len(first_run_counts) == 5, (
f"Expected 5 loops before disable, got {first_run_counts}"
)
# Wait for component to be re-enabled by periodic ISR simulation and run again
try:
await asyncio.wait_for(isr_component_re_enabled.wait(), timeout=2.0)
except asyncio.TimeoutError:
pytest.fail("ISR component was not re-enabled after ISR call")
# Verify it's running again after ISR enable
count_after_isr = len(isr_component_counts)
assert count_after_isr > 5, (
f"Component didn't run after ISR enable: got {count_after_isr} counts total"
)
# Wait for pure ISR enable (no main loop enable) to work
try:
await asyncio.wait_for(isr_component_pure_re_enabled.wait(), timeout=2.0)
except asyncio.TimeoutError:
pytest.fail("ISR component was not re-enabled by pure ISR call")
# Verify it ran after pure ISR enable
final_count = len(isr_component_counts)
assert final_count > 10, (
f"Component didn't run after pure ISR enable: got {final_count} counts total"
)

View File

@@ -13,7 +13,19 @@ from aioesphomeapi import APIClient
ConfigWriter = Callable[[str, str | None], Awaitable[Path]]
CompileFunction = Callable[[Path], Awaitable[Path]]
RunFunction = Callable[[Path], Awaitable[asyncio.subprocess.Process]]
RunCompiledFunction = Callable[[str, str | None], AbstractAsyncContextManager[None]]
class RunCompiledFunction(Protocol):
"""Protocol for run_compiled function with optional line callback."""
def __call__( # noqa: E704
self,
yaml_content: str,
filename: str | None = None,
line_callback: Callable[[str], None] | None = None,
) -> AbstractAsyncContextManager[None]: ...
WaitFunction = Callable[[APIClient, float], Awaitable[bool]]

View File

@@ -0,0 +1,18 @@
esphome:
name: componenttestesp32p4idf
friendly_name: $component_name
esp32:
board: esp32-p4-evboard
framework:
type: esp-idf
logger:
level: VERY_VERBOSE
packages:
component_under_test: !include
file: $component_test_file
vars:
component_test_file: $component_test_file