mirror of
https://github.com/esphome/esphome.git
synced 2025-11-19 00:05:43 +00:00
Compare commits
24 Commits
reduce_log
...
bump-2025.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2681a14d05 | ||
|
|
f436f6ee2e | ||
|
|
f18bc62690 | ||
|
|
6db73df649 | ||
|
|
93215f1737 | ||
|
|
70aa94b8a4 | ||
|
|
e8998a79c7 | ||
|
|
3b25fdbc5f | ||
|
|
6c8577678c | ||
|
|
70366d2124 | ||
|
|
a38c4e0c6e | ||
|
|
6c6b03bda0 | ||
|
|
9e02e31917 | ||
|
|
3fd58f1a91 | ||
|
|
9151489481 | ||
|
|
f19296ac7f | ||
|
|
36868ee7b1 | ||
|
|
d559f9f52e | ||
|
|
6440b5fbf5 | ||
|
|
97c4914573 | ||
|
|
7ce94c27fe | ||
|
|
eb54c0026d | ||
|
|
fe00e209ff | ||
|
|
aed80732f9 |
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3
|
||||
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
@@ -86,6 +86,6 @@ jobs:
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3
|
||||
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
||||
@@ -11,7 +11,7 @@ ci:
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.14.5
|
||||
rev: v0.14.4
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
|
||||
2
Doxyfile
2
Doxyfile
@@ -48,7 +48,7 @@ PROJECT_NAME = ESPHome
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2025.12.0-dev
|
||||
PROJECT_NUMBER = 2025.11.0b4
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
||||
@@ -15,6 +15,11 @@ from . import (
|
||||
class MemoryAnalyzerCLI(MemoryAnalyzer):
|
||||
"""Memory analyzer with CLI-specific report generation."""
|
||||
|
||||
# Symbol size threshold for detailed analysis
|
||||
SYMBOL_SIZE_THRESHOLD: int = (
|
||||
100 # Show symbols larger than this in detailed analysis
|
||||
)
|
||||
|
||||
# Column width constants
|
||||
COL_COMPONENT: int = 29
|
||||
COL_FLASH_TEXT: int = 14
|
||||
@@ -191,14 +196,21 @@ class MemoryAnalyzerCLI(MemoryAnalyzer):
|
||||
f"{len(symbols):>{self.COL_CORE_COUNT}} | {percentage:>{self.COL_CORE_PERCENT - 1}.1f}%"
|
||||
)
|
||||
|
||||
# Top 15 largest core symbols
|
||||
# All core symbols above threshold
|
||||
lines.append("")
|
||||
lines.append(f"Top 15 Largest {_COMPONENT_CORE} Symbols:")
|
||||
sorted_core_symbols = sorted(
|
||||
self._esphome_core_symbols, key=lambda x: x[2], reverse=True
|
||||
)
|
||||
large_core_symbols = [
|
||||
(symbol, demangled, size)
|
||||
for symbol, demangled, size in sorted_core_symbols
|
||||
if size > self.SYMBOL_SIZE_THRESHOLD
|
||||
]
|
||||
|
||||
for i, (symbol, demangled, size) in enumerate(sorted_core_symbols[:15]):
|
||||
lines.append(
|
||||
f"{_COMPONENT_CORE} Symbols > {self.SYMBOL_SIZE_THRESHOLD} B ({len(large_core_symbols)} symbols):"
|
||||
)
|
||||
for i, (symbol, demangled, size) in enumerate(large_core_symbols):
|
||||
lines.append(f"{i + 1}. {demangled} ({size:,} B)")
|
||||
|
||||
lines.append("=" * self.TABLE_WIDTH)
|
||||
@@ -268,13 +280,15 @@ class MemoryAnalyzerCLI(MemoryAnalyzer):
|
||||
lines.append(f"Total size: {comp_mem.flash_total:,} B")
|
||||
lines.append("")
|
||||
|
||||
# Show all symbols > 100 bytes for better visibility
|
||||
# Show all symbols above threshold for better visibility
|
||||
large_symbols = [
|
||||
(sym, dem, size) for sym, dem, size in sorted_symbols if size > 100
|
||||
(sym, dem, size)
|
||||
for sym, dem, size in sorted_symbols
|
||||
if size > self.SYMBOL_SIZE_THRESHOLD
|
||||
]
|
||||
|
||||
lines.append(
|
||||
f"{comp_name} Symbols > 100 B ({len(large_symbols)} symbols):"
|
||||
f"{comp_name} Symbols > {self.SYMBOL_SIZE_THRESHOLD} B ({len(large_symbols)} symbols):"
|
||||
)
|
||||
for i, (symbol, demangled, size) in enumerate(large_symbols):
|
||||
lines.append(f"{i + 1}. {demangled} ({size:,} B)")
|
||||
|
||||
@@ -72,6 +72,16 @@ def _final_validate(config: ConfigType) -> ConfigType:
|
||||
"Add 'ap:' to your WiFi configuration to enable the captive portal."
|
||||
)
|
||||
|
||||
# Register socket needs for DNS server and additional HTTP connections
|
||||
# - 1 UDP socket for DNS server
|
||||
# - 3 additional TCP sockets for captive portal detection probes + configuration requests
|
||||
# OS captive portal detection makes multiple probe requests that stay in TIME_WAIT.
|
||||
# Need headroom for actual user configuration requests.
|
||||
# LRU purging will reclaim idle sockets to prevent exhaustion from repeated attempts.
|
||||
from esphome.components import socket
|
||||
|
||||
socket.consume_sockets(4, "captive_portal")(config)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
|
||||
@@ -50,8 +50,8 @@ void CaptivePortal::handle_wifisave(AsyncWebServerRequest *request) {
|
||||
ESP_LOGI(TAG, "Requested WiFi Settings Change:");
|
||||
ESP_LOGI(TAG, " SSID='%s'", ssid.c_str());
|
||||
ESP_LOGI(TAG, " Password=" LOG_SECRET("'%s'"), psk.c_str());
|
||||
wifi::global_wifi_component->save_wifi_sta(ssid, psk);
|
||||
wifi::global_wifi_component->start_scanning();
|
||||
// Defer save to main loop thread to avoid NVS operations from HTTP thread
|
||||
this->defer([ssid, psk]() { wifi::global_wifi_component->save_wifi_sta(ssid, psk); });
|
||||
request->redirect(ESPHOME_F("/?save"));
|
||||
}
|
||||
|
||||
@@ -63,6 +63,12 @@ void CaptivePortal::start() {
|
||||
this->base_->init();
|
||||
if (!this->initialized_) {
|
||||
this->base_->add_handler(this);
|
||||
#ifdef USE_ESP32
|
||||
// Enable LRU socket purging to handle captive portal detection probe bursts
|
||||
// OS captive portal detection makes many simultaneous HTTP requests which can
|
||||
// exhaust sockets. LRU purging automatically closes oldest idle connections.
|
||||
this->base_->get_server()->set_lru_purge_enable(true);
|
||||
#endif
|
||||
}
|
||||
|
||||
network::IPAddress ip = wifi::global_wifi_component->wifi_soft_ap_ip();
|
||||
|
||||
@@ -40,6 +40,10 @@ class CaptivePortal : public AsyncWebHandler, public Component {
|
||||
void end() {
|
||||
this->active_ = false;
|
||||
this->disable_loop(); // Stop processing DNS requests
|
||||
#ifdef USE_ESP32
|
||||
// Disable LRU socket purging now that captive portal is done
|
||||
this->base_->get_server()->set_lru_purge_enable(false);
|
||||
#endif
|
||||
this->base_->deinit();
|
||||
if (this->dns_server_ != nullptr) {
|
||||
this->dns_server_->stop();
|
||||
|
||||
@@ -931,6 +931,12 @@ async def to_code(config):
|
||||
add_idf_sdkconfig_option("CONFIG_MBEDTLS_CERTIFICATE_BUNDLE", True)
|
||||
add_idf_sdkconfig_option("CONFIG_ESP_PHY_REDUCE_TX_POWER", True)
|
||||
|
||||
# ESP32-S2 Arduino: Disable USB Serial on boot to avoid TinyUSB dependency
|
||||
if get_esp32_variant() == VARIANT_ESP32S2:
|
||||
cg.add_build_unflag("-DARDUINO_USB_CDC_ON_BOOT=1")
|
||||
cg.add_build_unflag("-DARDUINO_USB_CDC_ON_BOOT=0")
|
||||
cg.add_build_flag("-DARDUINO_USB_CDC_ON_BOOT=0")
|
||||
|
||||
cg.add_build_flag("-Wno-nonnull-compare")
|
||||
|
||||
add_idf_sdkconfig_option(f"CONFIG_IDF_TARGET_{variant}", True)
|
||||
|
||||
@@ -634,13 +634,11 @@ void ESP32BLE::dump_config() {
|
||||
io_capability_s = "invalid";
|
||||
break;
|
||||
}
|
||||
char mac_s[18];
|
||||
format_mac_addr_upper(mac_address, mac_s);
|
||||
ESP_LOGCONFIG(TAG,
|
||||
"BLE:\n"
|
||||
" MAC address: %s\n"
|
||||
" IO Capability: %s",
|
||||
mac_s, io_capability_s);
|
||||
format_mac_address_pretty(mac_address).c_str(), io_capability_s);
|
||||
} else {
|
||||
ESP_LOGCONFIG(TAG, "Bluetooth stack is not enabled");
|
||||
}
|
||||
|
||||
@@ -20,6 +20,10 @@ CONF_ON_STOP = "on_stop"
|
||||
CONF_STATUS_INDICATOR = "status_indicator"
|
||||
CONF_WIFI_TIMEOUT = "wifi_timeout"
|
||||
|
||||
# Default WiFi timeout - aligned with WiFi component ap_timeout
|
||||
# Allows sufficient time to try all BSSIDs before starting provisioning mode
|
||||
DEFAULT_WIFI_TIMEOUT = "90s"
|
||||
|
||||
|
||||
improv_ns = cg.esphome_ns.namespace("improv")
|
||||
Error = improv_ns.enum("Error")
|
||||
@@ -59,7 +63,7 @@ CONFIG_SCHEMA = (
|
||||
CONF_AUTHORIZED_DURATION, default="1min"
|
||||
): cv.positive_time_period_milliseconds,
|
||||
cv.Optional(
|
||||
CONF_WIFI_TIMEOUT, default="1min"
|
||||
CONF_WIFI_TIMEOUT, default=DEFAULT_WIFI_TIMEOUT
|
||||
): cv.positive_time_period_milliseconds,
|
||||
cv.Optional(CONF_ON_PROVISIONED): automation.validate_automation(
|
||||
{
|
||||
|
||||
@@ -127,6 +127,7 @@ void ESP32ImprovComponent::loop() {
|
||||
// Set initial state based on whether we have an authorizer
|
||||
this->set_state_(this->get_initial_state_(), false);
|
||||
this->set_error_(improv::ERROR_NONE);
|
||||
this->should_start_ = false; // Clear flag after starting
|
||||
ESP_LOGD(TAG, "Service started!");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@ class ESP32ImprovComponent : public Component, public improv_base::ImprovBase {
|
||||
void start();
|
||||
void stop();
|
||||
bool is_active() const { return this->state_ != improv::STATE_STOPPED; }
|
||||
bool should_start() const { return this->should_start_; }
|
||||
|
||||
#ifdef USE_ESP32_IMPROV_STATE_CALLBACK
|
||||
void add_on_state_callback(std::function<void(improv::State, improv::Error)> &&callback) {
|
||||
|
||||
@@ -31,35 +31,83 @@ CONFIG_SCHEMA = cv.Schema(
|
||||
cv.GenerateID(CONF_LD2410_ID): cv.use_id(LD2410Component),
|
||||
cv.Optional(CONF_MOVING_DISTANCE): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_DISTANCE,
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_SIGNAL,
|
||||
unit_of_measurement=UNIT_CENTIMETER,
|
||||
),
|
||||
cv.Optional(CONF_STILL_DISTANCE): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_DISTANCE,
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_SIGNAL,
|
||||
unit_of_measurement=UNIT_CENTIMETER,
|
||||
),
|
||||
cv.Optional(CONF_MOVING_ENERGY): sensor.sensor_schema(
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_MOTION_SENSOR,
|
||||
unit_of_measurement=UNIT_PERCENT,
|
||||
),
|
||||
cv.Optional(CONF_STILL_ENERGY): sensor.sensor_schema(
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_FLASH,
|
||||
unit_of_measurement=UNIT_PERCENT,
|
||||
),
|
||||
cv.Optional(CONF_LIGHT): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_ILLUMINANCE,
|
||||
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_LIGHTBULB,
|
||||
),
|
||||
cv.Optional(CONF_DETECTION_DISTANCE): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_DISTANCE,
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_SIGNAL,
|
||||
unit_of_measurement=UNIT_CENTIMETER,
|
||||
),
|
||||
@@ -73,7 +121,13 @@ CONFIG_SCHEMA = CONFIG_SCHEMA.extend(
|
||||
cv.Optional(CONF_MOVE_ENERGY): sensor.sensor_schema(
|
||||
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
|
||||
filters=[
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_MOTION_SENSOR,
|
||||
unit_of_measurement=UNIT_PERCENT,
|
||||
@@ -81,7 +135,13 @@ CONFIG_SCHEMA = CONFIG_SCHEMA.extend(
|
||||
cv.Optional(CONF_STILL_ENERGY): sensor.sensor_schema(
|
||||
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
|
||||
filters=[
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_FLASH,
|
||||
unit_of_measurement=UNIT_PERCENT,
|
||||
|
||||
@@ -31,36 +31,84 @@ CONFIG_SCHEMA = cv.Schema(
|
||||
cv.GenerateID(CONF_LD2412_ID): cv.use_id(LD2412Component),
|
||||
cv.Optional(CONF_DETECTION_DISTANCE): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_DISTANCE,
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_SIGNAL,
|
||||
unit_of_measurement=UNIT_CENTIMETER,
|
||||
),
|
||||
cv.Optional(CONF_LIGHT): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_ILLUMINANCE,
|
||||
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_LIGHTBULB,
|
||||
unit_of_measurement=UNIT_EMPTY, # No standard unit for this light sensor
|
||||
),
|
||||
cv.Optional(CONF_MOVING_DISTANCE): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_DISTANCE,
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_SIGNAL,
|
||||
unit_of_measurement=UNIT_CENTIMETER,
|
||||
),
|
||||
cv.Optional(CONF_MOVING_ENERGY): sensor.sensor_schema(
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_MOTION_SENSOR,
|
||||
unit_of_measurement=UNIT_PERCENT,
|
||||
),
|
||||
cv.Optional(CONF_STILL_DISTANCE): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_DISTANCE,
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_SIGNAL,
|
||||
unit_of_measurement=UNIT_CENTIMETER,
|
||||
),
|
||||
cv.Optional(CONF_STILL_ENERGY): sensor.sensor_schema(
|
||||
filters=[{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}],
|
||||
filters=[
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_FLASH,
|
||||
unit_of_measurement=UNIT_PERCENT,
|
||||
),
|
||||
@@ -74,7 +122,13 @@ CONFIG_SCHEMA = CONFIG_SCHEMA.extend(
|
||||
cv.Optional(CONF_MOVE_ENERGY): sensor.sensor_schema(
|
||||
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
|
||||
filters=[
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_MOTION_SENSOR,
|
||||
unit_of_measurement=UNIT_PERCENT,
|
||||
@@ -82,7 +136,13 @@ CONFIG_SCHEMA = CONFIG_SCHEMA.extend(
|
||||
cv.Optional(CONF_STILL_ENERGY): sensor.sensor_schema(
|
||||
entity_category=ENTITY_CATEGORY_DIAGNOSTIC,
|
||||
filters=[
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)}
|
||||
{
|
||||
"timeout": {
|
||||
"timeout": cv.TimePeriod(milliseconds=1000),
|
||||
"value": "last",
|
||||
}
|
||||
},
|
||||
{"throttle_with_priority": cv.TimePeriod(milliseconds=1000)},
|
||||
],
|
||||
icon=ICON_FLASH,
|
||||
unit_of_measurement=UNIT_PERCENT,
|
||||
|
||||
@@ -365,10 +365,8 @@ async def to_code(config):
|
||||
if CORE.is_esp32:
|
||||
if config[CONF_HARDWARE_UART] == USB_CDC:
|
||||
add_idf_sdkconfig_option("CONFIG_ESP_CONSOLE_USB_CDC", True)
|
||||
cg.add_define("USE_LOGGER_UART_SELECTION_USB_CDC")
|
||||
elif config[CONF_HARDWARE_UART] == USB_SERIAL_JTAG:
|
||||
add_idf_sdkconfig_option("CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG", True)
|
||||
cg.add_define("USE_LOGGER_UART_SELECTION_USB_SERIAL_JTAG")
|
||||
try:
|
||||
uart_selection(USB_SERIAL_JTAG)
|
||||
cg.add_define("USE_LOGGER_USB_SERIAL_JTAG")
|
||||
|
||||
@@ -65,9 +65,7 @@ void HOT Logger::log_vprintf_(uint8_t level, const char *tag, int line, const ch
|
||||
uint16_t buffer_at = 0; // Initialize buffer position
|
||||
this->format_log_to_buffer_with_terminator_(level, tag, line, format, args, console_buffer, &buffer_at,
|
||||
MAX_CONSOLE_LOG_MSG_SIZE);
|
||||
// Add newline if platform needs it (ESP32 doesn't add via write_msg_)
|
||||
this->add_newline_to_buffer_if_needed_(console_buffer, &buffer_at, MAX_CONSOLE_LOG_MSG_SIZE);
|
||||
this->write_msg_(console_buffer, buffer_at);
|
||||
this->write_msg_(console_buffer);
|
||||
}
|
||||
|
||||
// Reset the recursion guard for this task
|
||||
@@ -133,19 +131,18 @@ void Logger::log_vprintf_(uint8_t level, const char *tag, int line, const __Flas
|
||||
|
||||
// Save the offset before calling format_log_to_buffer_with_terminator_
|
||||
// since it will increment tx_buffer_at_ to the end of the formatted string
|
||||
uint16_t msg_start = this->tx_buffer_at_;
|
||||
uint32_t msg_start = this->tx_buffer_at_;
|
||||
this->format_log_to_buffer_with_terminator_(level, tag, line, this->tx_buffer_, args, this->tx_buffer_,
|
||||
&this->tx_buffer_at_, this->tx_buffer_size_);
|
||||
|
||||
uint16_t msg_length =
|
||||
// Write to console and send callback starting at the msg_start
|
||||
if (this->baud_rate_ > 0) {
|
||||
this->write_msg_(this->tx_buffer_ + msg_start);
|
||||
}
|
||||
size_t msg_length =
|
||||
this->tx_buffer_at_ - msg_start; // Don't subtract 1 - tx_buffer_at_ is already at the null terminator position
|
||||
|
||||
// Callbacks get message first (before console write)
|
||||
this->log_callback_.call(level, tag, this->tx_buffer_ + msg_start, msg_length);
|
||||
|
||||
// Write to console starting at the msg_start
|
||||
this->write_tx_buffer_to_console_(msg_start, &msg_length);
|
||||
|
||||
global_recursion_guard_ = false;
|
||||
}
|
||||
#endif // USE_STORE_LOG_STR_IN_FLASH
|
||||
@@ -212,7 +209,9 @@ void Logger::process_messages_() {
|
||||
// This ensures all log messages appear on the console in a clean, serialized manner
|
||||
// Note: Messages may appear slightly out of order due to async processing, but
|
||||
// this is preferred over corrupted/interleaved console output
|
||||
this->write_tx_buffer_to_console_();
|
||||
if (this->baud_rate_ > 0) {
|
||||
this->write_msg_(this->tx_buffer_);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No messages to process, disable loop if appropriate
|
||||
|
||||
@@ -71,17 +71,6 @@ static constexpr uint16_t MAX_HEADER_SIZE = 128;
|
||||
// "0x" + 2 hex digits per byte + '\0'
|
||||
static constexpr size_t MAX_POINTER_REPRESENTATION = 2 + sizeof(void *) * 2 + 1;
|
||||
|
||||
// Platform-specific: does write_msg_ add its own newline?
|
||||
// false: Caller must add newline to buffer before calling write_msg_ (ESP32, ESP8266)
|
||||
// Allows single write call with newline included for efficiency
|
||||
// true: write_msg_ adds newline itself via puts()/println() (other platforms)
|
||||
// Newline should NOT be added to buffer
|
||||
#if defined(USE_ESP32) || defined(USE_ESP8266)
|
||||
static constexpr bool WRITE_MSG_ADDS_NEWLINE = false;
|
||||
#else
|
||||
static constexpr bool WRITE_MSG_ADDS_NEWLINE = true;
|
||||
#endif
|
||||
|
||||
#if defined(USE_ESP32) || defined(USE_ESP8266) || defined(USE_RP2040) || defined(USE_LIBRETINY) || defined(USE_ZEPHYR)
|
||||
/** Enum for logging UART selection
|
||||
*
|
||||
@@ -184,7 +173,7 @@ class Logger : public Component {
|
||||
|
||||
protected:
|
||||
void process_messages_();
|
||||
void write_msg_(const char *msg, size_t len);
|
||||
void write_msg_(const char *msg);
|
||||
|
||||
// Format a log message with printf-style arguments and write it to a buffer with header, footer, and null terminator
|
||||
// It's the caller's responsibility to initialize buffer_at (typically to 0)
|
||||
@@ -211,35 +200,6 @@ class Logger : public Component {
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to add newline to buffer for platforms that need it
|
||||
// Modifies buffer_at to include the newline
|
||||
inline void HOT add_newline_to_buffer_if_needed_(char *buffer, uint16_t *buffer_at, uint16_t buffer_size) {
|
||||
if constexpr (!WRITE_MSG_ADDS_NEWLINE) {
|
||||
// Add newline - don't need to maintain null termination
|
||||
// write_msg_ now always receives explicit length, so we can safely overwrite the null terminator
|
||||
// This is safe because:
|
||||
// 1. Callbacks already received the message (before we add newline)
|
||||
// 2. write_msg_ receives the length explicitly (doesn't need null terminator)
|
||||
if (*buffer_at < buffer_size) {
|
||||
buffer[(*buffer_at)++] = '\n';
|
||||
} else if (buffer_size > 0) {
|
||||
// Buffer was full - replace last char with newline to ensure it's visible
|
||||
buffer[buffer_size - 1] = '\n';
|
||||
*buffer_at = buffer_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to write tx_buffer_ to console if logging is enabled
|
||||
// INTERNAL USE ONLY - offset > 0 requires length parameter to be non-null
|
||||
inline void HOT write_tx_buffer_to_console_(uint16_t offset = 0, uint16_t *length = nullptr) {
|
||||
if (this->baud_rate_ > 0) {
|
||||
uint16_t *len_ptr = length ? length : &this->tx_buffer_at_;
|
||||
this->add_newline_to_buffer_if_needed_(this->tx_buffer_ + offset, len_ptr, this->tx_buffer_size_ - offset);
|
||||
this->write_msg_(this->tx_buffer_ + offset, *len_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to format and send a log message to both console and callbacks
|
||||
inline void HOT log_message_to_buffer_and_send_(uint8_t level, const char *tag, int line, const char *format,
|
||||
va_list args) {
|
||||
@@ -248,11 +208,10 @@ class Logger : public Component {
|
||||
this->format_log_to_buffer_with_terminator_(level, tag, line, format, args, this->tx_buffer_, &this->tx_buffer_at_,
|
||||
this->tx_buffer_size_);
|
||||
|
||||
// Callbacks get message WITHOUT newline (for API/MQTT/syslog)
|
||||
if (this->baud_rate_ > 0) {
|
||||
this->write_msg_(this->tx_buffer_); // If logging is enabled, write to console
|
||||
}
|
||||
this->log_callback_.call(level, tag, this->tx_buffer_, this->tx_buffer_at_);
|
||||
|
||||
// Console gets message WITH newline (if platform needs it)
|
||||
this->write_tx_buffer_to_console_();
|
||||
}
|
||||
|
||||
// Write the body of the log message to the buffer
|
||||
@@ -466,9 +425,7 @@ class Logger : public Component {
|
||||
}
|
||||
|
||||
// Update buffer_at with the formatted length (handle truncation)
|
||||
// When vsnprintf truncates (ret >= remaining), it writes (remaining - 1) chars + null terminator
|
||||
// When it doesn't truncate (ret < remaining), it writes ret chars + null terminator
|
||||
uint16_t formatted_len = (ret >= remaining) ? (remaining - 1) : ret;
|
||||
uint16_t formatted_len = (ret >= remaining) ? remaining : ret;
|
||||
*buffer_at += formatted_len;
|
||||
|
||||
// Remove all trailing newlines right after formatting
|
||||
|
||||
@@ -121,23 +121,25 @@ void Logger::pre_setup() {
|
||||
ESP_LOGI(TAG, "Log initialized");
|
||||
}
|
||||
|
||||
void HOT Logger::write_msg_(const char *msg, size_t len) {
|
||||
// Length is now always passed explicitly - no strlen() fallback needed
|
||||
|
||||
#if defined(USE_LOGGER_UART_SELECTION_USB_CDC) || defined(USE_LOGGER_UART_SELECTION_USB_SERIAL_JTAG)
|
||||
// USB CDC/JTAG - single write including newline (already in buffer)
|
||||
// Use fwrite to stdout which goes through VFS to USB console
|
||||
//
|
||||
// Note: These defines indicate the user's YAML configuration choice (hardware_uart: USB_CDC/USB_SERIAL_JTAG).
|
||||
// They are ONLY defined when the user explicitly selects USB as the logger output in their config.
|
||||
// This is compile-time selection, not runtime detection - if USB is configured, it's always used.
|
||||
// There is no fallback to regular UART if "USB isn't connected" - that's the user's responsibility
|
||||
// to configure correctly for their hardware. This approach eliminates runtime overhead.
|
||||
fwrite(msg, 1, len, stdout);
|
||||
void HOT Logger::write_msg_(const char *msg) {
|
||||
if (
|
||||
#if defined(USE_LOGGER_USB_CDC) && !defined(USE_LOGGER_USB_SERIAL_JTAG)
|
||||
this->uart_ == UART_SELECTION_USB_CDC
|
||||
#elif defined(USE_LOGGER_USB_SERIAL_JTAG) && !defined(USE_LOGGER_USB_CDC)
|
||||
this->uart_ == UART_SELECTION_USB_SERIAL_JTAG
|
||||
#elif defined(USE_LOGGER_USB_CDC) && defined(USE_LOGGER_USB_SERIAL_JTAG)
|
||||
this->uart_ == UART_SELECTION_USB_CDC || this->uart_ == UART_SELECTION_USB_SERIAL_JTAG
|
||||
#else
|
||||
// Regular UART - single write including newline (already in buffer)
|
||||
uart_write_bytes(this->uart_num_, msg, len);
|
||||
/* DISABLES CODE */ (false) // NOLINT
|
||||
#endif
|
||||
) {
|
||||
puts(msg);
|
||||
} else {
|
||||
// Use tx_buffer_at_ if msg points to tx_buffer_, otherwise fall back to strlen
|
||||
size_t len = (msg == this->tx_buffer_) ? this->tx_buffer_at_ : strlen(msg);
|
||||
uart_write_bytes(this->uart_num_, msg, len);
|
||||
uart_write_bytes(this->uart_num_, "\n", 1);
|
||||
}
|
||||
}
|
||||
|
||||
const LogString *Logger::get_uart_selection_() {
|
||||
|
||||
@@ -33,10 +33,7 @@ void Logger::pre_setup() {
|
||||
ESP_LOGI(TAG, "Log initialized");
|
||||
}
|
||||
|
||||
void HOT Logger::write_msg_(const char *msg, size_t len) {
|
||||
// Single write with newline already in buffer (added by caller)
|
||||
this->hw_serial_->write(msg, len);
|
||||
}
|
||||
void HOT Logger::write_msg_(const char *msg) { this->hw_serial_->println(msg); }
|
||||
|
||||
const LogString *Logger::get_uart_selection_() {
|
||||
switch (this->uart_) {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
namespace esphome::logger {
|
||||
|
||||
void HOT Logger::write_msg_(const char *msg, size_t) {
|
||||
void HOT Logger::write_msg_(const char *msg) {
|
||||
time_t rawtime;
|
||||
struct tm *timeinfo;
|
||||
char buffer[80];
|
||||
|
||||
@@ -49,7 +49,7 @@ void Logger::pre_setup() {
|
||||
ESP_LOGI(TAG, "Log initialized");
|
||||
}
|
||||
|
||||
void HOT Logger::write_msg_(const char *msg, size_t) { this->hw_serial_->println(msg); }
|
||||
void HOT Logger::write_msg_(const char *msg) { this->hw_serial_->println(msg); }
|
||||
|
||||
const LogString *Logger::get_uart_selection_() {
|
||||
switch (this->uart_) {
|
||||
|
||||
@@ -27,7 +27,7 @@ void Logger::pre_setup() {
|
||||
ESP_LOGI(TAG, "Log initialized");
|
||||
}
|
||||
|
||||
void HOT Logger::write_msg_(const char *msg, size_t) { this->hw_serial_->println(msg); }
|
||||
void HOT Logger::write_msg_(const char *msg) { this->hw_serial_->println(msg); }
|
||||
|
||||
const LogString *Logger::get_uart_selection_() {
|
||||
switch (this->uart_) {
|
||||
|
||||
@@ -62,7 +62,7 @@ void Logger::pre_setup() {
|
||||
ESP_LOGI(TAG, "Log initialized");
|
||||
}
|
||||
|
||||
void HOT Logger::write_msg_(const char *msg, size_t) {
|
||||
void HOT Logger::write_msg_(const char *msg) {
|
||||
#ifdef CONFIG_PRINTK
|
||||
printk("%s\n", msg);
|
||||
#endif
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from esphome import automation
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import CONF_ID, CONF_RANGE_FROM, CONF_RANGE_TO, CONF_STEP, CONF_VALUE
|
||||
from esphome.cpp_generator import MockObj
|
||||
|
||||
from ..automation import action_to_code
|
||||
from ..defines import (
|
||||
@@ -114,7 +115,9 @@ class SpinboxType(WidgetType):
|
||||
w.obj, digits, digits - config[CONF_DECIMAL_PLACES]
|
||||
)
|
||||
if (value := config.get(CONF_VALUE)) is not None:
|
||||
lv.spinbox_set_value(w.obj, await lv_float.process(value))
|
||||
lv.spinbox_set_value(
|
||||
w.obj, MockObj(await lv_float.process(value)) * w.get_scale()
|
||||
)
|
||||
|
||||
def get_scale(self, config):
|
||||
return 10 ** config[CONF_DECIMAL_PLACES]
|
||||
|
||||
@@ -350,6 +350,7 @@ void MipiRgb::dump_config() {
|
||||
"\n Width: %u"
|
||||
"\n Height: %u"
|
||||
"\n Rotation: %d degrees"
|
||||
"\n PCLK Inverted: %s"
|
||||
"\n HSync Pulse Width: %u"
|
||||
"\n HSync Back Porch: %u"
|
||||
"\n HSync Front Porch: %u"
|
||||
@@ -357,18 +358,18 @@ void MipiRgb::dump_config() {
|
||||
"\n VSync Back Porch: %u"
|
||||
"\n VSync Front Porch: %u"
|
||||
"\n Invert Colors: %s"
|
||||
"\n Pixel Clock: %dMHz"
|
||||
"\n Pixel Clock: %uMHz"
|
||||
"\n Reset Pin: %s"
|
||||
"\n DE Pin: %s"
|
||||
"\n PCLK Pin: %s"
|
||||
"\n HSYNC Pin: %s"
|
||||
"\n VSYNC Pin: %s",
|
||||
this->model_, this->width_, this->height_, this->rotation_, this->hsync_pulse_width_,
|
||||
this->hsync_back_porch_, this->hsync_front_porch_, this->vsync_pulse_width_, this->vsync_back_porch_,
|
||||
this->vsync_front_porch_, YESNO(this->invert_colors_), this->pclk_frequency_ / 1000000,
|
||||
get_pin_name(this->reset_pin_).c_str(), get_pin_name(this->de_pin_).c_str(),
|
||||
get_pin_name(this->pclk_pin_).c_str(), get_pin_name(this->hsync_pin_).c_str(),
|
||||
get_pin_name(this->vsync_pin_).c_str());
|
||||
this->model_, this->width_, this->height_, this->rotation_, YESNO(this->pclk_inverted_),
|
||||
this->hsync_pulse_width_, this->hsync_back_porch_, this->hsync_front_porch_, this->vsync_pulse_width_,
|
||||
this->vsync_back_porch_, this->vsync_front_porch_, YESNO(this->invert_colors_),
|
||||
(unsigned) (this->pclk_frequency_ / 1000000), get_pin_name(this->reset_pin_).c_str(),
|
||||
get_pin_name(this->de_pin_).c_str(), get_pin_name(this->pclk_pin_).c_str(),
|
||||
get_pin_name(this->hsync_pin_).c_str(), get_pin_name(this->vsync_pin_).c_str());
|
||||
|
||||
if (this->madctl_ & MADCTL_BGR) {
|
||||
this->dump_pins_(8, 13, "Blue", 0);
|
||||
|
||||
@@ -11,6 +11,7 @@ st7701s.extend(
|
||||
vsync_pin=17,
|
||||
pclk_pin=21,
|
||||
pclk_frequency="12MHz",
|
||||
pclk_inverted=False,
|
||||
pixel_mode="18bit",
|
||||
mirror_x=True,
|
||||
mirror_y=True,
|
||||
|
||||
@@ -118,10 +118,10 @@ struct IPAddress {
|
||||
operator arduino_ns::IPAddress() const { return ip_addr_get_ip4_u32(&ip_addr_); }
|
||||
#endif
|
||||
|
||||
bool is_set() const { return !ip_addr_isany(&ip_addr_); } // NOLINT(readability-simplify-boolean-expr)
|
||||
bool is_ip4() const { return IP_IS_V4(&ip_addr_); }
|
||||
bool is_ip6() const { return IP_IS_V6(&ip_addr_); }
|
||||
bool is_multicast() const { return ip_addr_ismulticast(&ip_addr_); }
|
||||
bool is_set() { return !ip_addr_isany(&ip_addr_); } // NOLINT(readability-simplify-boolean-expr)
|
||||
bool is_ip4() { return IP_IS_V4(&ip_addr_); }
|
||||
bool is_ip6() { return IP_IS_V6(&ip_addr_); }
|
||||
bool is_multicast() { return ip_addr_ismulticast(&ip_addr_); }
|
||||
std::string str() const { return str_lower_case(ipaddr_ntoa(&ip_addr_)); }
|
||||
bool operator==(const IPAddress &other) const { return ip_addr_cmp(&ip_addr_, &other.ip_addr_); }
|
||||
bool operator!=(const IPAddress &other) const { return !ip_addr_cmp(&ip_addr_, &other.ip_addr_); }
|
||||
|
||||
@@ -103,7 +103,6 @@ nrf52_ns = cg.esphome_ns.namespace("nrf52")
|
||||
DeviceFirmwareUpdate = nrf52_ns.class_("DeviceFirmwareUpdate", cg.Component)
|
||||
|
||||
CONF_DFU = "dfu"
|
||||
CONF_DCDC = "dcdc"
|
||||
CONF_REG0 = "reg0"
|
||||
CONF_UICR_ERASE = "uicr_erase"
|
||||
|
||||
@@ -122,7 +121,6 @@ CONFIG_SCHEMA = cv.All(
|
||||
cv.Required(CONF_RESET_PIN): pins.gpio_output_pin_schema,
|
||||
}
|
||||
),
|
||||
cv.Optional(CONF_DCDC, default=True): cv.boolean,
|
||||
cv.Optional(CONF_REG0): cv.Schema(
|
||||
{
|
||||
cv.Required(CONF_VOLTAGE): cv.All(
|
||||
@@ -198,7 +196,6 @@ async def to_code(config: ConfigType) -> None:
|
||||
|
||||
if dfu_config := config.get(CONF_DFU):
|
||||
CORE.add_job(_dfu_to_code, dfu_config)
|
||||
zephyr_add_prj_conf("BOARD_ENABLE_DCDC", config[CONF_DCDC])
|
||||
|
||||
if reg0_config := config.get(CONF_REG0):
|
||||
value = VOLTAGE_LEVELS.index(reg0_config[CONF_VOLTAGE])
|
||||
|
||||
@@ -73,17 +73,17 @@ void SFA30Component::update() {
|
||||
}
|
||||
|
||||
if (this->formaldehyde_sensor_ != nullptr) {
|
||||
const float formaldehyde = raw_data[0] / 5.0f;
|
||||
const float formaldehyde = static_cast<int16_t>(raw_data[0]) / 5.0f;
|
||||
this->formaldehyde_sensor_->publish_state(formaldehyde);
|
||||
}
|
||||
|
||||
if (this->humidity_sensor_ != nullptr) {
|
||||
const float humidity = raw_data[1] / 100.0f;
|
||||
const float humidity = static_cast<int16_t>(raw_data[1]) / 100.0f;
|
||||
this->humidity_sensor_->publish_state(humidity);
|
||||
}
|
||||
|
||||
if (this->temperature_sensor_ != nullptr) {
|
||||
const float temperature = raw_data[2] / 200.0f;
|
||||
const float temperature = static_cast<int16_t>(raw_data[2]) / 200.0f;
|
||||
this->temperature_sensor_->publish_state(temperature);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
import logging
|
||||
|
||||
import esphome.codegen as cg
|
||||
from esphome.components import time as time_
|
||||
from esphome.config_helpers import merge_config
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import (
|
||||
CONF_ID,
|
||||
CONF_PLATFORM,
|
||||
CONF_SERVERS,
|
||||
CONF_TIME,
|
||||
PLATFORM_BK72XX,
|
||||
PLATFORM_ESP32,
|
||||
PLATFORM_ESP8266,
|
||||
@@ -12,13 +17,74 @@ from esphome.const import (
|
||||
PLATFORM_RTL87XX,
|
||||
)
|
||||
from esphome.core import CORE
|
||||
import esphome.final_validate as fv
|
||||
from esphome.types import ConfigType
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
DEPENDENCIES = ["network"]
|
||||
|
||||
CONF_SNTP = "sntp"
|
||||
|
||||
sntp_ns = cg.esphome_ns.namespace("sntp")
|
||||
SNTPComponent = sntp_ns.class_("SNTPComponent", time_.RealTimeClock)
|
||||
|
||||
DEFAULT_SERVERS = ["0.pool.ntp.org", "1.pool.ntp.org", "2.pool.ntp.org"]
|
||||
|
||||
|
||||
def _sntp_final_validate(config: ConfigType) -> None:
|
||||
"""Merge multiple SNTP instances into one, similar to OTA merging behavior."""
|
||||
full_conf = fv.full_config.get()
|
||||
time_confs = full_conf.get(CONF_TIME, [])
|
||||
|
||||
sntp_configs: list[ConfigType] = []
|
||||
other_time_configs: list[ConfigType] = []
|
||||
|
||||
for time_conf in time_confs:
|
||||
if time_conf.get(CONF_PLATFORM) == CONF_SNTP:
|
||||
sntp_configs.append(time_conf)
|
||||
else:
|
||||
other_time_configs.append(time_conf)
|
||||
|
||||
if len(sntp_configs) <= 1:
|
||||
return
|
||||
|
||||
# Merge all SNTP configs into the first one
|
||||
merged = sntp_configs[0]
|
||||
for sntp_conf in sntp_configs[1:]:
|
||||
# Validate that IDs are consistent if manually specified
|
||||
if merged[CONF_ID].is_manual and sntp_conf[CONF_ID].is_manual:
|
||||
raise cv.Invalid(
|
||||
f"Found multiple SNTP configurations but {CONF_ID} is inconsistent"
|
||||
)
|
||||
merged = merge_config(merged, sntp_conf)
|
||||
|
||||
# Deduplicate servers while preserving order
|
||||
servers = merged[CONF_SERVERS]
|
||||
unique_servers = list(dict.fromkeys(servers))
|
||||
|
||||
# Warn if we're dropping servers due to 3-server limit
|
||||
if len(unique_servers) > 3:
|
||||
dropped = unique_servers[3:]
|
||||
unique_servers = unique_servers[:3]
|
||||
_LOGGER.warning(
|
||||
"SNTP supports maximum 3 servers. Dropped excess server(s): %s",
|
||||
dropped,
|
||||
)
|
||||
|
||||
merged[CONF_SERVERS] = unique_servers
|
||||
|
||||
_LOGGER.warning(
|
||||
"Found and merged %d SNTP time configurations into one instance",
|
||||
len(sntp_configs),
|
||||
)
|
||||
|
||||
# Replace time configs with merged SNTP + other time platforms
|
||||
other_time_configs.append(merged)
|
||||
full_conf[CONF_TIME] = other_time_configs
|
||||
fv.full_config.set(full_conf)
|
||||
|
||||
|
||||
CONFIG_SCHEMA = cv.All(
|
||||
time_.TIME_SCHEMA.extend(
|
||||
{
|
||||
@@ -40,6 +106,8 @@ CONFIG_SCHEMA = cv.All(
|
||||
),
|
||||
)
|
||||
|
||||
FINAL_VALIDATE_SCHEMA = _sntp_final_validate
|
||||
|
||||
|
||||
async def to_code(config):
|
||||
servers = config[CONF_SERVERS]
|
||||
|
||||
@@ -56,11 +56,19 @@ uint32_t ESP8266UartComponent::get_config() {
|
||||
}
|
||||
|
||||
void ESP8266UartComponent::setup() {
|
||||
if (this->rx_pin_) {
|
||||
this->rx_pin_->setup();
|
||||
}
|
||||
if (this->tx_pin_ && this->rx_pin_ != this->tx_pin_) {
|
||||
this->tx_pin_->setup();
|
||||
auto setup_pin_if_needed = [](InternalGPIOPin *pin) {
|
||||
if (!pin) {
|
||||
return;
|
||||
}
|
||||
const auto mask = gpio::Flags::FLAG_OPEN_DRAIN | gpio::Flags::FLAG_PULLUP | gpio::Flags::FLAG_PULLDOWN;
|
||||
if ((pin->get_flags() & mask) != gpio::Flags::FLAG_NONE) {
|
||||
pin->setup();
|
||||
}
|
||||
};
|
||||
|
||||
setup_pin_if_needed(this->rx_pin_);
|
||||
if (this->rx_pin_ != this->tx_pin_) {
|
||||
setup_pin_if_needed(this->tx_pin_);
|
||||
}
|
||||
|
||||
// Use Arduino HardwareSerial UARTs if all used pins match the ones
|
||||
|
||||
@@ -133,11 +133,19 @@ void IDFUARTComponent::load_settings(bool dump_config) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->rx_pin_) {
|
||||
this->rx_pin_->setup();
|
||||
}
|
||||
if (this->tx_pin_ && this->rx_pin_ != this->tx_pin_) {
|
||||
this->tx_pin_->setup();
|
||||
auto setup_pin_if_needed = [](InternalGPIOPin *pin) {
|
||||
if (!pin) {
|
||||
return;
|
||||
}
|
||||
const auto mask = gpio::Flags::FLAG_OPEN_DRAIN | gpio::Flags::FLAG_PULLUP | gpio::Flags::FLAG_PULLDOWN;
|
||||
if ((pin->get_flags() & mask) != gpio::Flags::FLAG_NONE) {
|
||||
pin->setup();
|
||||
}
|
||||
};
|
||||
|
||||
setup_pin_if_needed(this->rx_pin_);
|
||||
if (this->rx_pin_ != this->tx_pin_) {
|
||||
setup_pin_if_needed(this->tx_pin_);
|
||||
}
|
||||
|
||||
int8_t tx = this->tx_pin_ != nullptr ? this->tx_pin_->get_pin() : -1;
|
||||
|
||||
@@ -53,7 +53,7 @@ void LibreTinyUARTComponent::setup() {
|
||||
|
||||
auto shouldFallbackToSoftwareSerial = [&]() -> bool {
|
||||
auto hasFlags = [](InternalGPIOPin *pin, const gpio::Flags mask) -> bool {
|
||||
return pin && pin->get_flags() & mask != gpio::Flags::FLAG_NONE;
|
||||
return pin && (pin->get_flags() & mask) != gpio::Flags::FLAG_NONE;
|
||||
};
|
||||
if (hasFlags(this->tx_pin_, gpio::Flags::FLAG_OPEN_DRAIN | gpio::Flags::FLAG_PULLUP | gpio::Flags::FLAG_PULLDOWN) ||
|
||||
hasFlags(this->rx_pin_, gpio::Flags::FLAG_OPEN_DRAIN | gpio::Flags::FLAG_PULLUP | gpio::Flags::FLAG_PULLDOWN)) {
|
||||
|
||||
@@ -52,11 +52,19 @@ uint16_t RP2040UartComponent::get_config() {
|
||||
}
|
||||
|
||||
void RP2040UartComponent::setup() {
|
||||
if (this->rx_pin_) {
|
||||
this->rx_pin_->setup();
|
||||
}
|
||||
if (this->tx_pin_ && this->rx_pin_ != this->tx_pin_) {
|
||||
this->tx_pin_->setup();
|
||||
auto setup_pin_if_needed = [](InternalGPIOPin *pin) {
|
||||
if (!pin) {
|
||||
return;
|
||||
}
|
||||
const auto mask = gpio::Flags::FLAG_OPEN_DRAIN | gpio::Flags::FLAG_PULLUP | gpio::Flags::FLAG_PULLDOWN;
|
||||
if ((pin->get_flags() & mask) != gpio::Flags::FLAG_NONE) {
|
||||
pin->setup();
|
||||
}
|
||||
};
|
||||
|
||||
setup_pin_if_needed(this->rx_pin_);
|
||||
if (this->rx_pin_ != this->tx_pin_) {
|
||||
setup_pin_if_needed(this->tx_pin_);
|
||||
}
|
||||
|
||||
uint16_t config = get_config();
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
import logging
|
||||
|
||||
import esphome.codegen as cg
|
||||
from esphome.components.esp32 import add_idf_component
|
||||
from esphome.components.ota import BASE_OTA_SCHEMA, OTAComponent, ota_to_code
|
||||
from esphome.config_helpers import merge_config
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import CONF_ID
|
||||
from esphome.const import CONF_ID, CONF_OTA, CONF_PLATFORM, CONF_WEB_SERVER
|
||||
from esphome.core import CORE, coroutine_with_priority
|
||||
from esphome.coroutine import CoroPriority
|
||||
import esphome.final_validate as fv
|
||||
from esphome.types import ConfigType
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
CODEOWNERS = ["@esphome/core"]
|
||||
DEPENDENCIES = ["network", "web_server_base"]
|
||||
@@ -12,6 +19,53 @@ DEPENDENCIES = ["network", "web_server_base"]
|
||||
web_server_ns = cg.esphome_ns.namespace("web_server")
|
||||
WebServerOTAComponent = web_server_ns.class_("WebServerOTAComponent", OTAComponent)
|
||||
|
||||
|
||||
def _web_server_ota_final_validate(config: ConfigType) -> None:
|
||||
"""Merge multiple web_server OTA instances into one.
|
||||
|
||||
Multiple web_server OTA instances register duplicate HTTP handlers for /update,
|
||||
causing undefined behavior. Merge them into a single instance.
|
||||
"""
|
||||
full_conf = fv.full_config.get()
|
||||
ota_confs = full_conf.get(CONF_OTA, [])
|
||||
|
||||
web_server_ota_configs: list[ConfigType] = []
|
||||
other_ota_configs: list[ConfigType] = []
|
||||
|
||||
for ota_conf in ota_confs:
|
||||
if ota_conf.get(CONF_PLATFORM) == CONF_WEB_SERVER:
|
||||
web_server_ota_configs.append(ota_conf)
|
||||
else:
|
||||
other_ota_configs.append(ota_conf)
|
||||
|
||||
if len(web_server_ota_configs) <= 1:
|
||||
return
|
||||
|
||||
# Merge all web_server OTA configs into the first one
|
||||
merged = web_server_ota_configs[0]
|
||||
for ota_conf in web_server_ota_configs[1:]:
|
||||
# Validate that IDs are consistent if manually specified
|
||||
if (
|
||||
merged[CONF_ID].is_manual
|
||||
and ota_conf[CONF_ID].is_manual
|
||||
and merged[CONF_ID] != ota_conf[CONF_ID]
|
||||
):
|
||||
raise cv.Invalid(
|
||||
f"Found multiple web_server OTA configurations but {CONF_ID} is inconsistent"
|
||||
)
|
||||
merged = merge_config(merged, ota_conf)
|
||||
|
||||
_LOGGER.warning(
|
||||
"Found and merged %d web_server OTA configurations into one instance",
|
||||
len(web_server_ota_configs),
|
||||
)
|
||||
|
||||
# Replace OTA configs with merged web_server + other OTA platforms
|
||||
other_ota_configs.append(merged)
|
||||
full_conf[CONF_OTA] = other_ota_configs
|
||||
fv.full_config.set(full_conf)
|
||||
|
||||
|
||||
CONFIG_SCHEMA = (
|
||||
cv.Schema(
|
||||
{
|
||||
@@ -22,6 +76,8 @@ CONFIG_SCHEMA = (
|
||||
.extend(cv.COMPONENT_SCHEMA)
|
||||
)
|
||||
|
||||
FINAL_VALIDATE_SCHEMA = _web_server_ota_final_validate
|
||||
|
||||
|
||||
@coroutine_with_priority(CoroPriority.WEB_SERVER_OTA)
|
||||
async def to_code(config):
|
||||
|
||||
@@ -94,6 +94,18 @@ void AsyncWebServer::end() {
|
||||
}
|
||||
}
|
||||
|
||||
void AsyncWebServer::set_lru_purge_enable(bool enable) {
|
||||
if (this->lru_purge_enable_ == enable) {
|
||||
return; // No change needed
|
||||
}
|
||||
this->lru_purge_enable_ = enable;
|
||||
// If server is already running, restart it with new config
|
||||
if (this->server_) {
|
||||
this->end();
|
||||
this->begin();
|
||||
}
|
||||
}
|
||||
|
||||
void AsyncWebServer::begin() {
|
||||
if (this->server_) {
|
||||
this->end();
|
||||
@@ -101,6 +113,8 @@ void AsyncWebServer::begin() {
|
||||
httpd_config_t config = HTTPD_DEFAULT_CONFIG();
|
||||
config.server_port = this->port_;
|
||||
config.uri_match_fn = [](const char * /*unused*/, const char * /*unused*/, size_t /*unused*/) { return true; };
|
||||
// Enable LRU purging if requested (e.g., by captive portal to handle probe bursts)
|
||||
config.lru_purge_enable = this->lru_purge_enable_;
|
||||
if (httpd_start(&this->server_, &config) == ESP_OK) {
|
||||
const httpd_uri_t handler_get = {
|
||||
.uri = "",
|
||||
@@ -242,6 +256,7 @@ void AsyncWebServerRequest::send(int code, const char *content_type, const char
|
||||
void AsyncWebServerRequest::redirect(const std::string &url) {
|
||||
httpd_resp_set_status(*this, "302 Found");
|
||||
httpd_resp_set_hdr(*this, "Location", url.c_str());
|
||||
httpd_resp_set_hdr(*this, "Connection", "close");
|
||||
httpd_resp_send(*this, nullptr, 0);
|
||||
}
|
||||
|
||||
@@ -489,10 +504,18 @@ AsyncEventSourceResponse::AsyncEventSourceResponse(const AsyncWebServerRequest *
|
||||
|
||||
void AsyncEventSourceResponse::destroy(void *ptr) {
|
||||
auto *rsp = static_cast<AsyncEventSourceResponse *>(ptr);
|
||||
ESP_LOGD(TAG, "Event source connection closed (fd: %d)", rsp->fd_.load());
|
||||
// Mark as dead by setting fd to 0 - will be cleaned up in the main loop
|
||||
rsp->fd_.store(0);
|
||||
// Note: We don't delete or remove from set here to avoid race conditions
|
||||
int fd = rsp->fd_.exchange(0); // Atomically get and clear fd
|
||||
|
||||
if (fd > 0) {
|
||||
ESP_LOGD(TAG, "Event source connection closed (fd: %d)", fd);
|
||||
// Immediately shut down the socket to prevent lwIP from delivering more data
|
||||
// This prevents "recv_tcp: recv for wrong pcb!" assertions when the TCP stack
|
||||
// tries to deliver queued data after the session is marked as dead
|
||||
// See: https://github.com/esphome/esphome/issues/11936
|
||||
shutdown(fd, SHUT_RDWR);
|
||||
// Note: We don't close() the socket - httpd owns it and will close it
|
||||
}
|
||||
// Session will be cleaned up in the main loop to avoid race conditions
|
||||
}
|
||||
|
||||
// helper for allowing only unique entries in the queue
|
||||
|
||||
@@ -199,9 +199,13 @@ class AsyncWebServer {
|
||||
return *handler;
|
||||
}
|
||||
|
||||
void set_lru_purge_enable(bool enable);
|
||||
httpd_handle_t get_server() { return this->server_; }
|
||||
|
||||
protected:
|
||||
uint16_t port_{};
|
||||
httpd_handle_t server_{};
|
||||
bool lru_purge_enable_{false};
|
||||
static esp_err_t request_handler(httpd_req_t *r);
|
||||
static esp_err_t request_post_handler(httpd_req_t *r);
|
||||
esp_err_t request_handler_(AsyncWebServerRequest *request) const;
|
||||
|
||||
@@ -69,6 +69,12 @@ CONF_MIN_AUTH_MODE = "min_auth_mode"
|
||||
# Limited to 127 because selected_sta_index_ is int8_t in C++
|
||||
MAX_WIFI_NETWORKS = 127
|
||||
|
||||
# Default AP timeout - allows sufficient time to try all BSSIDs during initial connection
|
||||
# After AP starts, WiFi scanning is skipped to avoid disrupting the AP, so we only
|
||||
# get best-effort connection attempts. Longer timeout ensures we exhaust all options
|
||||
# before falling back to AP mode. Aligned with improv wifi_timeout default.
|
||||
DEFAULT_AP_TIMEOUT = "90s"
|
||||
|
||||
wifi_ns = cg.esphome_ns.namespace("wifi")
|
||||
EAPAuth = wifi_ns.struct("EAPAuth")
|
||||
ManualIP = wifi_ns.struct("ManualIP")
|
||||
@@ -177,7 +183,7 @@ CONF_AP_TIMEOUT = "ap_timeout"
|
||||
WIFI_NETWORK_AP = WIFI_NETWORK_BASE.extend(
|
||||
{
|
||||
cv.Optional(
|
||||
CONF_AP_TIMEOUT, default="1min"
|
||||
CONF_AP_TIMEOUT, default=DEFAULT_AP_TIMEOUT
|
||||
): cv.positive_time_period_milliseconds,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -199,7 +199,12 @@ static constexpr uint8_t WIFI_RETRY_COUNT_PER_AP = 1;
|
||||
|
||||
/// Cooldown duration in milliseconds after adapter restart or repeated failures
|
||||
/// Allows WiFi hardware to stabilize before next connection attempt
|
||||
static constexpr uint32_t WIFI_COOLDOWN_DURATION_MS = 1000;
|
||||
static constexpr uint32_t WIFI_COOLDOWN_DURATION_MS = 500;
|
||||
|
||||
/// Cooldown duration when fallback AP is active and captive portal may be running
|
||||
/// Longer interval gives users time to configure WiFi without constant connection attempts
|
||||
/// While connecting, WiFi can't beacon the AP properly, so needs longer cooldown
|
||||
static constexpr uint32_t WIFI_COOLDOWN_WITH_AP_ACTIVE_MS = 30000;
|
||||
|
||||
static constexpr uint8_t get_max_retries_for_phase(WiFiRetryPhase phase) {
|
||||
switch (phase) {
|
||||
@@ -275,7 +280,9 @@ int8_t WiFiComponent::find_next_hidden_sta_(int8_t start_index) {
|
||||
}
|
||||
}
|
||||
|
||||
if (!this->ssid_was_seen_in_scan_(sta.get_ssid())) {
|
||||
// If we didn't scan this cycle, treat all networks as potentially hidden
|
||||
// Otherwise, only retry networks that weren't seen in the scan
|
||||
if (!this->did_scan_this_cycle_ || !this->ssid_was_seen_in_scan_(sta.get_ssid())) {
|
||||
ESP_LOGD(TAG, "Hidden candidate " LOG_SECRET("'%s'") " at index %d", sta.get_ssid().c_str(), static_cast<int>(i));
|
||||
return static_cast<int8_t>(i);
|
||||
}
|
||||
@@ -417,10 +424,6 @@ void WiFiComponent::start() {
|
||||
void WiFiComponent::restart_adapter() {
|
||||
ESP_LOGW(TAG, "Restarting adapter");
|
||||
this->wifi_mode_(false, {});
|
||||
// Enter cooldown state to allow WiFi hardware to stabilize after restart
|
||||
// Don't set retry_phase_ or num_retried_ here - state machine handles transitions
|
||||
this->state_ = WIFI_COMPONENT_STATE_COOLDOWN;
|
||||
this->action_started_ = millis();
|
||||
this->error_from_callback_ = false;
|
||||
}
|
||||
|
||||
@@ -441,7 +444,16 @@ void WiFiComponent::loop() {
|
||||
switch (this->state_) {
|
||||
case WIFI_COMPONENT_STATE_COOLDOWN: {
|
||||
this->status_set_warning(LOG_STR("waiting to reconnect"));
|
||||
if (now - this->action_started_ > WIFI_COOLDOWN_DURATION_MS) {
|
||||
// Skip cooldown if new credentials were provided while connecting
|
||||
if (this->skip_cooldown_next_cycle_) {
|
||||
this->skip_cooldown_next_cycle_ = false;
|
||||
this->check_connecting_finished();
|
||||
break;
|
||||
}
|
||||
// Use longer cooldown when captive portal/improv is active to avoid disrupting user config
|
||||
bool portal_active = this->is_captive_portal_active_() || this->is_esp32_improv_active_();
|
||||
uint32_t cooldown_duration = portal_active ? WIFI_COOLDOWN_WITH_AP_ACTIVE_MS : WIFI_COOLDOWN_DURATION_MS;
|
||||
if (now - this->action_started_ > cooldown_duration) {
|
||||
// After cooldown we either restarted the adapter because of
|
||||
// a failure, or something tried to connect over and over
|
||||
// so we entered cooldown. In both cases we call
|
||||
@@ -495,7 +507,8 @@ void WiFiComponent::loop() {
|
||||
#endif // USE_WIFI_AP
|
||||
|
||||
#ifdef USE_IMPROV
|
||||
if (esp32_improv::global_improv_component != nullptr && !esp32_improv::global_improv_component->is_active()) {
|
||||
if (esp32_improv::global_improv_component != nullptr && !esp32_improv::global_improv_component->is_active() &&
|
||||
!esp32_improv::global_improv_component->should_start()) {
|
||||
if (now - this->last_connected_ > esp32_improv::global_improv_component->get_wifi_timeout()) {
|
||||
if (this->wifi_mode_(true, {}))
|
||||
esp32_improv::global_improv_component->start();
|
||||
@@ -605,6 +618,8 @@ void WiFiComponent::set_sta(const WiFiAP &ap) {
|
||||
this->init_sta(1);
|
||||
this->add_sta(ap);
|
||||
this->selected_sta_index_ = 0;
|
||||
// When new credentials are set (e.g., from improv), skip cooldown to retry immediately
|
||||
this->skip_cooldown_next_cycle_ = true;
|
||||
}
|
||||
|
||||
WiFiAP WiFiComponent::build_params_for_current_phase_() {
|
||||
@@ -666,29 +681,40 @@ void WiFiComponent::save_wifi_sta(const std::string &ssid, const std::string &pa
|
||||
sta.set_ssid(ssid);
|
||||
sta.set_password(password);
|
||||
this->set_sta(sta);
|
||||
|
||||
// Trigger connection attempt (exits cooldown if needed, no-op if already connecting/connected)
|
||||
this->connect_soon_();
|
||||
}
|
||||
|
||||
void WiFiComponent::connect_soon_() {
|
||||
// Only trigger retry if we're in cooldown - if already connecting/connected, do nothing
|
||||
if (this->state_ == WIFI_COMPONENT_STATE_COOLDOWN) {
|
||||
ESP_LOGD(TAG, "Exiting cooldown early due to new WiFi credentials");
|
||||
this->retry_connect();
|
||||
}
|
||||
}
|
||||
|
||||
void WiFiComponent::start_connecting(const WiFiAP &ap) {
|
||||
// Log connection attempt at INFO level with priority
|
||||
char bssid_s[18];
|
||||
std::string bssid_formatted;
|
||||
int8_t priority = 0;
|
||||
|
||||
if (ap.get_bssid().has_value()) {
|
||||
format_mac_addr_upper(ap.get_bssid().value().data(), bssid_s);
|
||||
bssid_formatted = format_mac_address_pretty(ap.get_bssid().value().data());
|
||||
priority = this->get_sta_priority(ap.get_bssid().value());
|
||||
}
|
||||
|
||||
ESP_LOGI(TAG,
|
||||
"Connecting to " LOG_SECRET("'%s'") " " LOG_SECRET("(%s)") " (priority %d, attempt %u/%u in phase %s)...",
|
||||
ap.get_ssid().c_str(), ap.get_bssid().has_value() ? bssid_s : LOG_STR_LITERAL("any"), priority,
|
||||
this->num_retried_ + 1, get_max_retries_for_phase(this->retry_phase_),
|
||||
ap.get_ssid().c_str(), ap.get_bssid().has_value() ? bssid_formatted.c_str() : LOG_STR_LITERAL("any"),
|
||||
priority, this->num_retried_ + 1, get_max_retries_for_phase(this->retry_phase_),
|
||||
LOG_STR_ARG(retry_phase_to_log_string(this->retry_phase_)));
|
||||
|
||||
#ifdef ESPHOME_LOG_HAS_VERBOSE
|
||||
ESP_LOGV(TAG, "Connection Params:");
|
||||
ESP_LOGV(TAG, " SSID: '%s'", ap.get_ssid().c_str());
|
||||
if (ap.get_bssid().has_value()) {
|
||||
ESP_LOGV(TAG, " BSSID: %s", bssid_s);
|
||||
ESP_LOGV(TAG, " BSSID: %s", format_mac_address_pretty(ap.get_bssid()->data()).c_str());
|
||||
} else {
|
||||
ESP_LOGV(TAG, " BSSID: Not Set");
|
||||
}
|
||||
@@ -797,8 +823,6 @@ const LogString *get_signal_bars(int8_t rssi) {
|
||||
|
||||
void WiFiComponent::print_connect_params_() {
|
||||
bssid_t bssid = wifi_bssid();
|
||||
char bssid_s[18];
|
||||
format_mac_addr_upper(bssid.data(), bssid_s);
|
||||
|
||||
ESP_LOGCONFIG(TAG, " Local MAC: %s", get_mac_address_pretty().c_str());
|
||||
if (this->is_disabled()) {
|
||||
@@ -821,9 +845,9 @@ void WiFiComponent::print_connect_params_() {
|
||||
" Gateway: %s\n"
|
||||
" DNS1: %s\n"
|
||||
" DNS2: %s",
|
||||
wifi_ssid().c_str(), bssid_s, App.get_name().c_str(), rssi, LOG_STR_ARG(get_signal_bars(rssi)),
|
||||
get_wifi_channel(), wifi_subnet_mask_().str().c_str(), wifi_gateway_ip_().str().c_str(),
|
||||
wifi_dns_ip_(0).str().c_str(), wifi_dns_ip_(1).str().c_str());
|
||||
wifi_ssid().c_str(), format_mac_address_pretty(bssid.data()).c_str(), App.get_name().c_str(), rssi,
|
||||
LOG_STR_ARG(get_signal_bars(rssi)), get_wifi_channel(), wifi_subnet_mask_().str().c_str(),
|
||||
wifi_gateway_ip_().str().c_str(), wifi_dns_ip_(0).str().c_str(), wifi_dns_ip_(1).str().c_str());
|
||||
#ifdef ESPHOME_LOG_HAS_VERBOSE
|
||||
if (const WiFiAP *config = this->get_selected_sta_(); config && config->get_bssid().has_value()) {
|
||||
ESP_LOGV(TAG, " Priority: %d", this->get_sta_priority(*config->get_bssid()));
|
||||
@@ -963,6 +987,7 @@ void WiFiComponent::check_scanning_finished() {
|
||||
return;
|
||||
}
|
||||
this->scan_done_ = false;
|
||||
this->did_scan_this_cycle_ = true;
|
||||
|
||||
if (this->scan_result_.empty()) {
|
||||
ESP_LOGW(TAG, "No networks found");
|
||||
@@ -1229,9 +1254,16 @@ WiFiRetryPhase WiFiComponent::determine_next_phase_() {
|
||||
return WiFiRetryPhase::RESTARTING_ADAPTER;
|
||||
|
||||
case WiFiRetryPhase::RESTARTING_ADAPTER:
|
||||
// After restart, go back to explicit hidden if we went through it initially, otherwise scan
|
||||
return this->went_through_explicit_hidden_phase_() ? WiFiRetryPhase::EXPLICIT_HIDDEN
|
||||
: WiFiRetryPhase::SCAN_CONNECTING;
|
||||
// After restart, go back to explicit hidden if we went through it initially
|
||||
if (this->went_through_explicit_hidden_phase_()) {
|
||||
return WiFiRetryPhase::EXPLICIT_HIDDEN;
|
||||
}
|
||||
// Skip scanning when captive portal/improv is active to avoid disrupting AP
|
||||
// Even passive scans can cause brief AP disconnections on ESP32
|
||||
if (this->is_captive_portal_active_() || this->is_esp32_improv_active_()) {
|
||||
return WiFiRetryPhase::RETRY_HIDDEN;
|
||||
}
|
||||
return WiFiRetryPhase::SCAN_CONNECTING;
|
||||
}
|
||||
|
||||
// Should never reach here
|
||||
@@ -1319,6 +1351,12 @@ bool WiFiComponent::transition_to_phase_(WiFiRetryPhase new_phase) {
|
||||
if (!this->is_captive_portal_active_() && !this->is_esp32_improv_active_()) {
|
||||
this->restart_adapter();
|
||||
}
|
||||
// Clear scan flag - we're starting a new retry cycle
|
||||
this->did_scan_this_cycle_ = false;
|
||||
// Always enter cooldown after restart (or skip-restart) to allow stabilization
|
||||
// Use extended cooldown when AP is active to avoid constant scanning that blocks DNS
|
||||
this->state_ = WIFI_COMPONENT_STATE_COOLDOWN;
|
||||
this->action_started_ = millis();
|
||||
// Return true to indicate we should wait (go to COOLDOWN) instead of immediately connecting
|
||||
return true;
|
||||
|
||||
@@ -1414,10 +1452,8 @@ void WiFiComponent::log_and_adjust_priority_for_failed_connect_() {
|
||||
(old_priority > std::numeric_limits<int8_t>::min()) ? (old_priority - 1) : std::numeric_limits<int8_t>::min();
|
||||
this->set_sta_priority(failed_bssid.value(), new_priority);
|
||||
}
|
||||
char bssid_s[18];
|
||||
format_mac_addr_upper(failed_bssid.value().data(), bssid_s);
|
||||
ESP_LOGD(TAG, "Failed " LOG_SECRET("'%s'") " " LOG_SECRET("(%s)") ", priority %d → %d", ssid.c_str(), bssid_s,
|
||||
old_priority, new_priority);
|
||||
ESP_LOGD(TAG, "Failed " LOG_SECRET("'%s'") " " LOG_SECRET("(%s)") ", priority %d → %d", ssid.c_str(),
|
||||
format_mac_address_pretty(failed_bssid.value().data()).c_str(), old_priority, new_priority);
|
||||
|
||||
// After adjusting priority, check if all priorities are now at minimum
|
||||
// If so, clear the vector to save memory and reset for fresh start
|
||||
|
||||
@@ -291,6 +291,7 @@ class WiFiComponent : public Component {
|
||||
void set_passive_scan(bool passive);
|
||||
|
||||
void save_wifi_sta(const std::string &ssid, const std::string &password);
|
||||
|
||||
// ========== INTERNAL METHODS ==========
|
||||
// (In most use cases you won't need these)
|
||||
/// Setup WiFi interface.
|
||||
@@ -424,12 +425,14 @@ class WiFiComponent : public Component {
|
||||
return true;
|
||||
}
|
||||
|
||||
void connect_soon_();
|
||||
|
||||
void wifi_loop_();
|
||||
bool wifi_mode_(optional<bool> sta, optional<bool> ap);
|
||||
bool wifi_sta_pre_setup_();
|
||||
bool wifi_apply_output_power_(float output_power);
|
||||
bool wifi_apply_power_save_();
|
||||
bool wifi_sta_ip_config_(const optional<ManualIP> &manual_ip);
|
||||
bool wifi_sta_ip_config_(optional<ManualIP> manual_ip);
|
||||
bool wifi_apply_hostname_();
|
||||
bool wifi_sta_connect_(const WiFiAP &ap);
|
||||
void wifi_pre_setup_();
|
||||
@@ -437,7 +440,7 @@ class WiFiComponent : public Component {
|
||||
bool wifi_scan_start_(bool passive);
|
||||
|
||||
#ifdef USE_WIFI_AP
|
||||
bool wifi_ap_ip_config_(const optional<ManualIP> &manual_ip);
|
||||
bool wifi_ap_ip_config_(optional<ManualIP> manual_ip);
|
||||
bool wifi_start_ap_(const WiFiAP &ap);
|
||||
#endif // USE_WIFI_AP
|
||||
|
||||
@@ -529,6 +532,8 @@ class WiFiComponent : public Component {
|
||||
bool enable_on_boot_;
|
||||
bool got_ipv4_address_{false};
|
||||
bool keep_scan_results_{false};
|
||||
bool did_scan_this_cycle_{false};
|
||||
bool skip_cooldown_next_cycle_{false};
|
||||
|
||||
// Pointers at the end (naturally aligned)
|
||||
Trigger<> *connect_trigger_{new Trigger<>()};
|
||||
|
||||
@@ -117,7 +117,7 @@ void netif_set_addr(struct netif *netif, const ip4_addr_t *ip, const ip4_addr_t
|
||||
};
|
||||
#endif
|
||||
|
||||
bool WiFiComponent::wifi_sta_ip_config_(const optional<ManualIP> &manual_ip) {
|
||||
bool WiFiComponent::wifi_sta_ip_config_(optional<ManualIP> manual_ip) {
|
||||
// enable STA
|
||||
if (!this->wifi_mode_(true, {}))
|
||||
return false;
|
||||
@@ -525,10 +525,8 @@ void WiFiComponent::wifi_event_callback(System_Event_t *event) {
|
||||
ESP_LOGW(TAG, "Disconnected ssid='%s' reason='Probe Request Unsuccessful'", buf);
|
||||
s_sta_connect_not_found = true;
|
||||
} else {
|
||||
char bssid_s[18];
|
||||
format_mac_addr_upper(it.bssid, bssid_s);
|
||||
ESP_LOGW(TAG, "Disconnected ssid='%s' bssid=" LOG_SECRET("%s") " reason='%s'", buf, bssid_s,
|
||||
LOG_STR_ARG(get_disconnect_reason_str(it.reason)));
|
||||
ESP_LOGW(TAG, "Disconnected ssid='%s' bssid=" LOG_SECRET("%s") " reason='%s'", buf,
|
||||
format_mac_address_pretty(it.bssid).c_str(), LOG_STR_ARG(get_disconnect_reason_str(it.reason)));
|
||||
s_sta_connect_error = true;
|
||||
}
|
||||
s_sta_connected = false;
|
||||
@@ -732,7 +730,7 @@ void WiFiComponent::wifi_scan_done_callback_(void *arg, STATUS status) {
|
||||
}
|
||||
|
||||
#ifdef USE_WIFI_AP
|
||||
bool WiFiComponent::wifi_ap_ip_config_(const optional<ManualIP> &manual_ip) {
|
||||
bool WiFiComponent::wifi_ap_ip_config_(optional<ManualIP> manual_ip) {
|
||||
// enable AP
|
||||
if (!this->wifi_mode_({}, true))
|
||||
return false;
|
||||
|
||||
@@ -487,7 +487,7 @@ bool WiFiComponent::wifi_sta_connect_(const WiFiAP &ap) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WiFiComponent::wifi_sta_ip_config_(const optional<ManualIP> &manual_ip) {
|
||||
bool WiFiComponent::wifi_sta_ip_config_(optional<ManualIP> manual_ip) {
|
||||
// enable STA
|
||||
if (!this->wifi_mode_(true, {}))
|
||||
return false;
|
||||
@@ -746,10 +746,8 @@ void WiFiComponent::wifi_process_event_(IDFWiFiEvent *data) {
|
||||
ESP_LOGI(TAG, "Disconnected ssid='%s' reason='Station Roaming'", buf);
|
||||
return;
|
||||
} else {
|
||||
char bssid_s[18];
|
||||
format_mac_addr_upper(it.bssid, bssid_s);
|
||||
ESP_LOGW(TAG, "Disconnected ssid='%s' bssid=" LOG_SECRET("%s") " reason='%s'", buf, bssid_s,
|
||||
get_disconnect_reason_str(it.reason));
|
||||
ESP_LOGW(TAG, "Disconnected ssid='%s' bssid=" LOG_SECRET("%s") " reason='%s'", buf,
|
||||
format_mac_address_pretty(it.bssid).c_str(), get_disconnect_reason_str(it.reason));
|
||||
s_sta_connect_error = true;
|
||||
}
|
||||
s_sta_connected = false;
|
||||
@@ -886,7 +884,7 @@ bool WiFiComponent::wifi_scan_start_(bool passive) {
|
||||
}
|
||||
|
||||
#ifdef USE_WIFI_AP
|
||||
bool WiFiComponent::wifi_ap_ip_config_(const optional<ManualIP> &manual_ip) {
|
||||
bool WiFiComponent::wifi_ap_ip_config_(optional<ManualIP> manual_ip) {
|
||||
esp_err_t err;
|
||||
|
||||
// enable AP
|
||||
|
||||
@@ -68,7 +68,7 @@ bool WiFiComponent::wifi_sta_pre_setup_() {
|
||||
return true;
|
||||
}
|
||||
bool WiFiComponent::wifi_apply_power_save_() { return WiFi.setSleep(this->power_save_ != WIFI_POWER_SAVE_NONE); }
|
||||
bool WiFiComponent::wifi_sta_ip_config_(const optional<ManualIP> &manual_ip) {
|
||||
bool WiFiComponent::wifi_sta_ip_config_(optional<ManualIP> manual_ip) {
|
||||
// enable STA
|
||||
if (!this->wifi_mode_(true, {}))
|
||||
return false;
|
||||
@@ -299,10 +299,8 @@ void WiFiComponent::wifi_event_callback_(esphome_wifi_event_id_t event, esphome_
|
||||
if (it.reason == WIFI_REASON_NO_AP_FOUND) {
|
||||
ESP_LOGW(TAG, "Disconnected ssid='%s' reason='Probe Request Unsuccessful'", buf);
|
||||
} else {
|
||||
char bssid_s[18];
|
||||
format_mac_addr_upper(it.bssid, bssid_s);
|
||||
ESP_LOGW(TAG, "Disconnected ssid='%s' bssid=" LOG_SECRET("%s") " reason='%s'", buf, bssid_s,
|
||||
get_disconnect_reason_str(it.reason));
|
||||
ESP_LOGW(TAG, "Disconnected ssid='%s' bssid=" LOG_SECRET("%s") " reason='%s'", buf,
|
||||
format_mac_address_pretty(it.bssid).c_str(), get_disconnect_reason_str(it.reason));
|
||||
}
|
||||
|
||||
uint8_t reason = it.reason;
|
||||
@@ -436,7 +434,7 @@ void WiFiComponent::wifi_scan_done_callback_() {
|
||||
}
|
||||
|
||||
#ifdef USE_WIFI_AP
|
||||
bool WiFiComponent::wifi_ap_ip_config_(const optional<ManualIP> &manual_ip) {
|
||||
bool WiFiComponent::wifi_ap_ip_config_(optional<ManualIP> manual_ip) {
|
||||
// enable AP
|
||||
if (!this->wifi_mode_({}, true))
|
||||
return false;
|
||||
|
||||
@@ -72,7 +72,7 @@ bool WiFiComponent::wifi_sta_connect_(const WiFiAP &ap) {
|
||||
|
||||
bool WiFiComponent::wifi_sta_pre_setup_() { return this->wifi_mode_(true, {}); }
|
||||
|
||||
bool WiFiComponent::wifi_sta_ip_config_(const optional<ManualIP> &manual_ip) {
|
||||
bool WiFiComponent::wifi_sta_ip_config_(optional<ManualIP> manual_ip) {
|
||||
if (!manual_ip.has_value()) {
|
||||
return true;
|
||||
}
|
||||
@@ -146,7 +146,7 @@ bool WiFiComponent::wifi_scan_start_(bool passive) {
|
||||
}
|
||||
|
||||
#ifdef USE_WIFI_AP
|
||||
bool WiFiComponent::wifi_ap_ip_config_(const optional<ManualIP> &manual_ip) {
|
||||
bool WiFiComponent::wifi_ap_ip_config_(optional<ManualIP> manual_ip) {
|
||||
esphome::network::IPAddress ip_address, gateway, subnet, dns;
|
||||
if (manual_ip.has_value()) {
|
||||
ip_address = manual_ip->static_ip;
|
||||
|
||||
@@ -4,7 +4,7 @@ from enum import Enum
|
||||
|
||||
from esphome.enum import StrEnum
|
||||
|
||||
__version__ = "2025.12.0-dev"
|
||||
__version__ = "2025.11.0b4"
|
||||
|
||||
ALLOWED_NAME_CHARS = "abcdefghijklmnopqrstuvwxyz0123456789-_"
|
||||
VALID_SUBSTITUTIONS_CHARACTERS = (
|
||||
@@ -336,6 +336,7 @@ CONF_ENERGY = "energy"
|
||||
CONF_ENTITY_CATEGORY = "entity_category"
|
||||
CONF_ENTITY_ID = "entity_id"
|
||||
CONF_ENUM_DATAPOINT = "enum_datapoint"
|
||||
CONF_ENVIRONMENT_VARIABLES = "environment_variables"
|
||||
CONF_EQUATION = "equation"
|
||||
CONF_ESP8266_DISABLE_SSL_SUPPORT = "esp8266_disable_ssl_support"
|
||||
CONF_ESPHOME = "esphome"
|
||||
|
||||
@@ -17,6 +17,7 @@ from esphome.const import (
|
||||
CONF_COMPILE_PROCESS_LIMIT,
|
||||
CONF_DEBUG_SCHEDULER,
|
||||
CONF_DEVICES,
|
||||
CONF_ENVIRONMENT_VARIABLES,
|
||||
CONF_ESPHOME,
|
||||
CONF_FRIENDLY_NAME,
|
||||
CONF_ID,
|
||||
@@ -215,6 +216,11 @@ CONFIG_SCHEMA = cv.All(
|
||||
cv.string_strict: cv.Any([cv.string], cv.string),
|
||||
}
|
||||
),
|
||||
cv.Optional(CONF_ENVIRONMENT_VARIABLES, default={}): cv.Schema(
|
||||
{
|
||||
cv.string_strict: cv.string,
|
||||
}
|
||||
),
|
||||
cv.Optional(CONF_ON_BOOT): automation.validate_automation(
|
||||
{
|
||||
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(StartupTrigger),
|
||||
@@ -426,6 +432,12 @@ async def _add_platformio_options(pio_options):
|
||||
cg.add_platformio_option(key, val)
|
||||
|
||||
|
||||
@coroutine_with_priority(CoroPriority.FINAL)
|
||||
async def _add_environment_variables(env_vars: dict[str, str]) -> None:
|
||||
# Set environment variables for the build process
|
||||
os.environ.update(env_vars)
|
||||
|
||||
|
||||
@coroutine_with_priority(CoroPriority.AUTOMATION)
|
||||
async def _add_automations(config):
|
||||
for conf in config.get(CONF_ON_BOOT, []):
|
||||
@@ -563,6 +575,9 @@ async def to_code(config: ConfigType) -> None:
|
||||
if config[CONF_PLATFORMIO_OPTIONS]:
|
||||
CORE.add_job(_add_platformio_options, config[CONF_PLATFORMIO_OPTIONS])
|
||||
|
||||
if config[CONF_ENVIRONMENT_VARIABLES]:
|
||||
CORE.add_job(_add_environment_variables, config[CONF_ENVIRONMENT_VARIABLES])
|
||||
|
||||
# Process areas
|
||||
all_areas: list[dict[str, str | core.ID]] = []
|
||||
if CONF_AREA in config:
|
||||
|
||||
@@ -154,8 +154,8 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
|
||||
|
||||
// For retries, check if there's a cancelled timeout first
|
||||
if (is_retry && name_cstr != nullptr && type == SchedulerItem::TIMEOUT &&
|
||||
(has_cancelled_timeout_in_container_(this->items_, component, name_cstr, /* match_retry= */ true) ||
|
||||
has_cancelled_timeout_in_container_(this->to_add_, component, name_cstr, /* match_retry= */ true))) {
|
||||
(has_cancelled_timeout_in_container_locked_(this->items_, component, name_cstr, /* match_retry= */ true) ||
|
||||
has_cancelled_timeout_in_container_locked_(this->to_add_, component, name_cstr, /* match_retry= */ true))) {
|
||||
// Skip scheduling - the retry was cancelled
|
||||
#ifdef ESPHOME_DEBUG_SCHEDULER
|
||||
ESP_LOGD(TAG, "Skipping retry '%s' - found cancelled item", name_cstr);
|
||||
@@ -556,7 +556,8 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
|
||||
#ifndef ESPHOME_THREAD_SINGLE
|
||||
// Mark items in defer queue as cancelled (they'll be skipped when processed)
|
||||
if (type == SchedulerItem::TIMEOUT) {
|
||||
total_cancelled += this->mark_matching_items_removed_(this->defer_queue_, component, name_cstr, type, match_retry);
|
||||
total_cancelled +=
|
||||
this->mark_matching_items_removed_locked_(this->defer_queue_, component, name_cstr, type, match_retry);
|
||||
}
|
||||
#endif /* not ESPHOME_THREAD_SINGLE */
|
||||
|
||||
@@ -565,19 +566,20 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
|
||||
// (removing the last element doesn't break heap structure)
|
||||
if (!this->items_.empty()) {
|
||||
auto &last_item = this->items_.back();
|
||||
if (this->matches_item_(last_item, component, name_cstr, type, match_retry)) {
|
||||
if (this->matches_item_locked_(last_item, component, name_cstr, type, match_retry)) {
|
||||
this->recycle_item_(std::move(this->items_.back()));
|
||||
this->items_.pop_back();
|
||||
total_cancelled++;
|
||||
}
|
||||
// For other items in heap, we can only mark for removal (can't remove from middle of heap)
|
||||
size_t heap_cancelled = this->mark_matching_items_removed_(this->items_, component, name_cstr, type, match_retry);
|
||||
size_t heap_cancelled =
|
||||
this->mark_matching_items_removed_locked_(this->items_, component, name_cstr, type, match_retry);
|
||||
total_cancelled += heap_cancelled;
|
||||
this->to_remove_ += heap_cancelled; // Track removals for heap items
|
||||
}
|
||||
|
||||
// Cancel items in to_add_
|
||||
total_cancelled += this->mark_matching_items_removed_(this->to_add_, component, name_cstr, type, match_retry);
|
||||
total_cancelled += this->mark_matching_items_removed_locked_(this->to_add_, component, name_cstr, type, match_retry);
|
||||
|
||||
return total_cancelled > 0;
|
||||
}
|
||||
@@ -609,13 +611,12 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
|
||||
if (now < last && (last - now) > HALF_MAX_UINT32) {
|
||||
this->millis_major_++;
|
||||
major++;
|
||||
this->last_millis_ = now;
|
||||
#ifdef ESPHOME_DEBUG_SCHEDULER
|
||||
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
|
||||
#endif /* ESPHOME_DEBUG_SCHEDULER */
|
||||
}
|
||||
|
||||
// Only update if time moved forward
|
||||
if (now > last) {
|
||||
} else if (now > last) {
|
||||
// Only update if time moved forward
|
||||
this->last_millis_ = now;
|
||||
}
|
||||
|
||||
|
||||
@@ -243,8 +243,18 @@ class Scheduler {
|
||||
}
|
||||
|
||||
// Helper function to check if item matches criteria for cancellation
|
||||
inline bool HOT matches_item_(const std::unique_ptr<SchedulerItem> &item, Component *component, const char *name_cstr,
|
||||
SchedulerItem::Type type, bool match_retry, bool skip_removed = true) const {
|
||||
// IMPORTANT: Must be called with scheduler lock held
|
||||
inline bool HOT matches_item_locked_(const std::unique_ptr<SchedulerItem> &item, Component *component,
|
||||
const char *name_cstr, SchedulerItem::Type type, bool match_retry,
|
||||
bool skip_removed = true) const {
|
||||
// THREAD SAFETY: Check for nullptr first to prevent LoadProhibited crashes. On multi-threaded
|
||||
// platforms, items can be moved out of defer_queue_ during processing, leaving nullptr entries.
|
||||
// PR #11305 added nullptr checks in callers (mark_matching_items_removed_locked_() and
|
||||
// has_cancelled_timeout_in_container_locked_()), but this check provides defense-in-depth: helper
|
||||
// functions should be safe regardless of caller behavior.
|
||||
// Fixes: https://github.com/esphome/esphome/issues/11940
|
||||
if (!item)
|
||||
return false;
|
||||
if (item->component != component || item->type != type || (skip_removed && item->remove) ||
|
||||
(match_retry && !item->is_retry)) {
|
||||
return false;
|
||||
@@ -304,8 +314,8 @@ class Scheduler {
|
||||
// SAFETY: Moving out the unique_ptr leaves a nullptr in the vector at defer_queue_front_.
|
||||
// This is intentional and safe because:
|
||||
// 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
|
||||
// 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_
|
||||
// and has_cancelled_timeout_in_container_ in scheduler.h)
|
||||
// 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_locked_
|
||||
// and has_cancelled_timeout_in_container_locked_ in scheduler.h)
|
||||
// 3. The lock protects concurrent access, but the nullptr remains until cleanup
|
||||
item = std::move(this->defer_queue_[this->defer_queue_front_]);
|
||||
this->defer_queue_front_++;
|
||||
@@ -393,10 +403,10 @@ class Scheduler {
|
||||
|
||||
// Helper to mark matching items in a container as removed
|
||||
// Returns the number of items marked for removal
|
||||
// IMPORTANT: Caller must hold the scheduler lock before calling this function.
|
||||
// IMPORTANT: Must be called with scheduler lock held
|
||||
template<typename Container>
|
||||
size_t mark_matching_items_removed_(Container &container, Component *component, const char *name_cstr,
|
||||
SchedulerItem::Type type, bool match_retry) {
|
||||
size_t mark_matching_items_removed_locked_(Container &container, Component *component, const char *name_cstr,
|
||||
SchedulerItem::Type type, bool match_retry) {
|
||||
size_t count = 0;
|
||||
for (auto &item : container) {
|
||||
// Skip nullptr items (can happen in defer_queue_ when items are being processed)
|
||||
@@ -405,7 +415,7 @@ class Scheduler {
|
||||
// the vector can still contain nullptr items from the processing loop. This check prevents crashes.
|
||||
if (!item)
|
||||
continue;
|
||||
if (this->matches_item_(item, component, name_cstr, type, match_retry)) {
|
||||
if (this->matches_item_locked_(item, component, name_cstr, type, match_retry)) {
|
||||
// Mark item for removal (platform-specific)
|
||||
this->set_item_removed_(item.get(), true);
|
||||
count++;
|
||||
@@ -415,9 +425,10 @@ class Scheduler {
|
||||
}
|
||||
|
||||
// Template helper to check if any item in a container matches our criteria
|
||||
// IMPORTANT: Must be called with scheduler lock held
|
||||
template<typename Container>
|
||||
bool has_cancelled_timeout_in_container_(const Container &container, Component *component, const char *name_cstr,
|
||||
bool match_retry) const {
|
||||
bool has_cancelled_timeout_in_container_locked_(const Container &container, Component *component,
|
||||
const char *name_cstr, bool match_retry) const {
|
||||
for (const auto &item : container) {
|
||||
// Skip nullptr items (can happen in defer_queue_ when items are being processed)
|
||||
// The defer_queue_ uses index-based processing: items are std::moved out but left in the
|
||||
@@ -426,8 +437,8 @@ class Scheduler {
|
||||
if (!item)
|
||||
continue;
|
||||
if (is_item_removed_(item.get()) &&
|
||||
this->matches_item_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry,
|
||||
/* skip_removed= */ false)) {
|
||||
this->matches_item_locked_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry,
|
||||
/* skip_removed= */ false)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
pylint==4.0.3
|
||||
pylint==4.0.2
|
||||
flake8==7.3.0 # also change in .pre-commit-config.yaml when updating
|
||||
ruff==0.14.5 # also change in .pre-commit-config.yaml when updating
|
||||
ruff==0.14.4 # also change in .pre-commit-config.yaml when updating
|
||||
pyupgrade==3.21.1 # also change in .pre-commit-config.yaml when updating
|
||||
pre-commit
|
||||
|
||||
# Unit tests
|
||||
pytest==9.0.1
|
||||
pytest==9.0.0
|
||||
pytest-cov==7.0.0
|
||||
pytest-mock==3.15.1
|
||||
pytest-asyncio==1.3.0
|
||||
|
||||
@@ -1,6 +1,18 @@
|
||||
"""Tests for the web_server OTA platform."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from esphome import config_validation as cv
|
||||
from esphome.components.web_server.ota import _web_server_ota_final_validate
|
||||
from esphome.const import CONF_ID, CONF_OTA, CONF_PLATFORM, CONF_WEB_SERVER
|
||||
from esphome.core import ID
|
||||
import esphome.final_validate as fv
|
||||
|
||||
|
||||
def test_web_server_ota_generated(generate_main: Callable[[str], str]) -> None:
|
||||
@@ -100,3 +112,144 @@ def test_web_server_ota_esp8266(generate_main: Callable[[str], str]) -> None:
|
||||
# Check web server OTA component is present
|
||||
assert "WebServerOTAComponent" in main_cpp
|
||||
assert "web_server::WebServerOTAComponent" in main_cpp
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ota_configs", "expected_count", "warning_expected"),
|
||||
[
|
||||
pytest.param(
|
||||
[
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web", is_manual=False),
|
||||
}
|
||||
],
|
||||
1,
|
||||
False,
|
||||
id="single_instance_no_merge",
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web_1", is_manual=False),
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web_2", is_manual=False),
|
||||
},
|
||||
],
|
||||
1,
|
||||
True,
|
||||
id="two_instances_merged",
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web_1", is_manual=False),
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: "esphome",
|
||||
CONF_ID: ID("ota_esphome", is_manual=False),
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web_2", is_manual=False),
|
||||
},
|
||||
],
|
||||
2,
|
||||
True,
|
||||
id="mixed_platforms_web_server_merged",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_web_server_ota_instance_merging(
|
||||
ota_configs: list[dict[str, Any]],
|
||||
expected_count: int,
|
||||
warning_expected: bool,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
) -> None:
|
||||
"""Test web_server OTA instance merging behavior."""
|
||||
full_conf = {CONF_OTA: ota_configs.copy()}
|
||||
|
||||
token = fv.full_config.set(full_conf)
|
||||
try:
|
||||
with caplog.at_level(logging.WARNING):
|
||||
_web_server_ota_final_validate({})
|
||||
|
||||
updated_conf = fv.full_config.get()
|
||||
|
||||
# Verify total number of OTA platforms
|
||||
assert len(updated_conf[CONF_OTA]) == expected_count
|
||||
|
||||
# Verify warning
|
||||
if warning_expected:
|
||||
assert any(
|
||||
"Found and merged" in record.message
|
||||
and "web_server OTA" in record.message
|
||||
for record in caplog.records
|
||||
), "Expected merge warning not found in log"
|
||||
else:
|
||||
assert len(caplog.records) == 0, "Unexpected warnings logged"
|
||||
finally:
|
||||
fv.full_config.reset(token)
|
||||
|
||||
|
||||
def test_web_server_ota_consistent_manual_ids(
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
) -> None:
|
||||
"""Test that consistent manual IDs can be merged successfully."""
|
||||
ota_configs = [
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web", is_manual=True),
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web", is_manual=True),
|
||||
},
|
||||
]
|
||||
|
||||
full_conf = {CONF_OTA: ota_configs}
|
||||
|
||||
token = fv.full_config.set(full_conf)
|
||||
try:
|
||||
with caplog.at_level(logging.WARNING):
|
||||
_web_server_ota_final_validate({})
|
||||
|
||||
updated_conf = fv.full_config.get()
|
||||
assert len(updated_conf[CONF_OTA]) == 1
|
||||
assert updated_conf[CONF_OTA][0][CONF_ID].id == "ota_web"
|
||||
assert any(
|
||||
"Found and merged" in record.message and "web_server OTA" in record.message
|
||||
for record in caplog.records
|
||||
)
|
||||
finally:
|
||||
fv.full_config.reset(token)
|
||||
|
||||
|
||||
def test_web_server_ota_inconsistent_manual_ids() -> None:
|
||||
"""Test that inconsistent manual IDs raise an error."""
|
||||
ota_configs = [
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web_1", is_manual=True),
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_WEB_SERVER,
|
||||
CONF_ID: ID("ota_web_2", is_manual=True),
|
||||
},
|
||||
]
|
||||
|
||||
full_conf = {CONF_OTA: ota_configs}
|
||||
|
||||
token = fv.full_config.set(full_conf)
|
||||
try:
|
||||
with pytest.raises(
|
||||
cv.Invalid,
|
||||
match="Found multiple web_server OTA configurations but id is inconsistent",
|
||||
):
|
||||
_web_server_ota_final_validate({})
|
||||
finally:
|
||||
fv.full_config.reset(token)
|
||||
|
||||
1
tests/component_tests/sntp/__init__.py
Normal file
1
tests/component_tests/sntp/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Tests for SNTP component."""
|
||||
22
tests/component_tests/sntp/config/sntp_test.yaml
Normal file
22
tests/component_tests/sntp/config/sntp_test.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
esphome:
|
||||
name: sntp-test
|
||||
|
||||
esp32:
|
||||
board: esp32dev
|
||||
framework:
|
||||
type: esp-idf
|
||||
|
||||
wifi:
|
||||
ssid: "testssid"
|
||||
password: "testpassword"
|
||||
|
||||
# Test multiple SNTP instances that should be merged
|
||||
time:
|
||||
- platform: sntp
|
||||
servers:
|
||||
- 192.168.1.1
|
||||
- pool.ntp.org
|
||||
- platform: sntp
|
||||
servers:
|
||||
- pool.ntp.org
|
||||
- 192.168.1.2
|
||||
238
tests/component_tests/sntp/test_init.py
Normal file
238
tests/component_tests/sntp/test_init.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""Tests for SNTP time configuration validation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from esphome import config_validation as cv
|
||||
from esphome.components.sntp.time import CONF_SNTP, _sntp_final_validate
|
||||
from esphome.const import CONF_ID, CONF_PLATFORM, CONF_SERVERS, CONF_TIME
|
||||
from esphome.core import ID
|
||||
import esphome.final_validate as fv
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("time_configs", "expected_count", "expected_servers", "warning_messages"),
|
||||
[
|
||||
pytest.param(
|
||||
[
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time", is_manual=False),
|
||||
CONF_SERVERS: ["192.168.1.1", "pool.ntp.org"],
|
||||
}
|
||||
],
|
||||
1,
|
||||
["192.168.1.1", "pool.ntp.org"],
|
||||
[],
|
||||
id="single_instance_no_merge",
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_1", is_manual=False),
|
||||
CONF_SERVERS: ["192.168.1.1", "pool.ntp.org"],
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_2", is_manual=False),
|
||||
CONF_SERVERS: ["192.168.1.2"],
|
||||
},
|
||||
],
|
||||
1,
|
||||
["192.168.1.1", "pool.ntp.org", "192.168.1.2"],
|
||||
["Found and merged 2 SNTP time configurations into one instance"],
|
||||
id="two_instances_merged",
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_1", is_manual=False),
|
||||
CONF_SERVERS: ["192.168.1.1", "pool.ntp.org"],
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_2", is_manual=False),
|
||||
CONF_SERVERS: ["pool.ntp.org", "192.168.1.2"],
|
||||
},
|
||||
],
|
||||
1,
|
||||
["192.168.1.1", "pool.ntp.org", "192.168.1.2"],
|
||||
["Found and merged 2 SNTP time configurations into one instance"],
|
||||
id="deduplication_preserves_order",
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_1", is_manual=False),
|
||||
CONF_SERVERS: ["192.168.1.1", "pool.ntp.org"],
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_2", is_manual=False),
|
||||
CONF_SERVERS: ["192.168.1.2", "pool2.ntp.org"],
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_3", is_manual=False),
|
||||
CONF_SERVERS: ["pool3.ntp.org"],
|
||||
},
|
||||
],
|
||||
1,
|
||||
["192.168.1.1", "pool.ntp.org", "192.168.1.2"],
|
||||
[
|
||||
"SNTP supports maximum 3 servers. Dropped excess server(s): ['pool2.ntp.org', 'pool3.ntp.org']",
|
||||
"Found and merged 3 SNTP time configurations into one instance",
|
||||
],
|
||||
id="three_instances_drops_excess_servers",
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_1", is_manual=False),
|
||||
CONF_SERVERS: [
|
||||
"192.168.1.1",
|
||||
"pool.ntp.org",
|
||||
"pool.ntp.org",
|
||||
"192.168.1.1",
|
||||
],
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_2", is_manual=False),
|
||||
CONF_SERVERS: ["pool.ntp.org", "192.168.1.2"],
|
||||
},
|
||||
],
|
||||
1,
|
||||
["192.168.1.1", "pool.ntp.org", "192.168.1.2"],
|
||||
["Found and merged 2 SNTP time configurations into one instance"],
|
||||
id="deduplication_multiple_duplicates",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_sntp_instance_merging(
|
||||
time_configs: list[dict[str, Any]],
|
||||
expected_count: int,
|
||||
expected_servers: list[str],
|
||||
warning_messages: list[str],
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
) -> None:
|
||||
"""Test SNTP instance merging behavior."""
|
||||
# Create a mock full config with time configs
|
||||
full_conf = {CONF_TIME: time_configs.copy()}
|
||||
|
||||
# Set the context var
|
||||
token = fv.full_config.set(full_conf)
|
||||
try:
|
||||
with caplog.at_level(logging.WARNING):
|
||||
_sntp_final_validate({})
|
||||
|
||||
# Get the updated config
|
||||
updated_conf = fv.full_config.get()
|
||||
|
||||
# Check if merging occurred
|
||||
if len(time_configs) > 1:
|
||||
# Verify only one SNTP instance remains
|
||||
sntp_instances = [
|
||||
tc
|
||||
for tc in updated_conf[CONF_TIME]
|
||||
if tc.get(CONF_PLATFORM) == CONF_SNTP
|
||||
]
|
||||
assert len(sntp_instances) == expected_count
|
||||
|
||||
# Verify server list
|
||||
assert sntp_instances[0][CONF_SERVERS] == expected_servers
|
||||
|
||||
# Verify warnings
|
||||
for expected_msg in warning_messages:
|
||||
assert any(
|
||||
expected_msg in record.message for record in caplog.records
|
||||
), f"Expected warning message '{expected_msg}' not found in log"
|
||||
else:
|
||||
# Single instance should not trigger merging or warnings
|
||||
assert len(caplog.records) == 0
|
||||
# Config should be unchanged
|
||||
assert updated_conf[CONF_TIME] == time_configs
|
||||
finally:
|
||||
fv.full_config.reset(token)
|
||||
|
||||
|
||||
def test_sntp_inconsistent_manual_ids() -> None:
|
||||
"""Test that inconsistent manual IDs raise an error."""
|
||||
# Create configs with manual IDs that are inconsistent
|
||||
time_configs = [
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_1", is_manual=True),
|
||||
CONF_SERVERS: ["192.168.1.1"],
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_2", is_manual=True),
|
||||
CONF_SERVERS: ["192.168.1.2"],
|
||||
},
|
||||
]
|
||||
|
||||
full_conf = {CONF_TIME: time_configs}
|
||||
|
||||
token = fv.full_config.set(full_conf)
|
||||
try:
|
||||
with pytest.raises(
|
||||
cv.Invalid,
|
||||
match="Found multiple SNTP configurations but id is inconsistent",
|
||||
):
|
||||
_sntp_final_validate({})
|
||||
finally:
|
||||
fv.full_config.reset(token)
|
||||
|
||||
|
||||
def test_sntp_with_other_time_platforms(caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that SNTP merging doesn't affect other time platforms."""
|
||||
time_configs = [
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_1", is_manual=False),
|
||||
CONF_SERVERS: ["192.168.1.1"],
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: "homeassistant",
|
||||
CONF_ID: ID("homeassistant_time", is_manual=False),
|
||||
},
|
||||
{
|
||||
CONF_PLATFORM: CONF_SNTP,
|
||||
CONF_ID: ID("sntp_time_2", is_manual=False),
|
||||
CONF_SERVERS: ["192.168.1.2"],
|
||||
},
|
||||
]
|
||||
|
||||
full_conf = {CONF_TIME: time_configs.copy()}
|
||||
|
||||
token = fv.full_config.set(full_conf)
|
||||
try:
|
||||
with caplog.at_level(logging.WARNING):
|
||||
_sntp_final_validate({})
|
||||
|
||||
updated_conf = fv.full_config.get()
|
||||
|
||||
# Should have 2 time platforms: 1 merged SNTP + 1 homeassistant
|
||||
assert len(updated_conf[CONF_TIME]) == 2
|
||||
|
||||
# Find the platforms
|
||||
platforms = {tc[CONF_PLATFORM] for tc in updated_conf[CONF_TIME]}
|
||||
assert platforms == {CONF_SNTP, "homeassistant"}
|
||||
|
||||
# Verify SNTP was merged
|
||||
sntp_instances = [
|
||||
tc for tc in updated_conf[CONF_TIME] if tc[CONF_PLATFORM] == CONF_SNTP
|
||||
]
|
||||
assert len(sntp_instances) == 1
|
||||
assert sntp_instances[0][CONF_SERVERS] == ["192.168.1.1", "192.168.1.2"]
|
||||
finally:
|
||||
fv.full_config.reset(token)
|
||||
@@ -2,6 +2,9 @@ esphome:
|
||||
debug_scheduler: true
|
||||
platformio_options:
|
||||
board_build.flash_mode: dio
|
||||
environment_variables:
|
||||
TEST_ENV_VAR: "test_value"
|
||||
BUILD_NUMBER: "12345"
|
||||
area:
|
||||
id: testing_area
|
||||
name: Testing Area
|
||||
|
||||
@@ -703,7 +703,9 @@ lvgl:
|
||||
on_value:
|
||||
- lvgl.spinbox.update:
|
||||
id: spinbox_id
|
||||
value: !lambda return x;
|
||||
value: !lambda |-
|
||||
static float yyy = 83.0;
|
||||
return yyy + .8;
|
||||
- button:
|
||||
styles: spin_button
|
||||
id: spin_up
|
||||
|
||||
@@ -15,7 +15,6 @@ nrf52:
|
||||
inverted: true
|
||||
mode:
|
||||
output: true
|
||||
dcdc: False
|
||||
reg0:
|
||||
voltage: 2.1V
|
||||
uicr_erase: true
|
||||
|
||||
Reference in New Issue
Block a user