1
0
mirror of https://github.com/esphome/esphome.git synced 2025-10-18 09:43:47 +01:00

Merge pull request #11199 from esphome/bump-2025.10.0b2

2025.10.0b2
This commit is contained in:
Jesse Hills
2025-10-13 10:55:34 +13:00
committed by GitHub
2905 changed files with 8944 additions and 11072 deletions

View File

@@ -186,6 +186,11 @@ This document provides essential context for AI models interacting with this pro
└── components/[component]/ # Component-specific tests └── components/[component]/ # Component-specific tests
``` ```
Run them using `script/test_build_components`. Use `-c <component>` to test specific components and `-t <target>` for specific platforms. Run them using `script/test_build_components`. Use `-c <component>` to test specific components and `-t <target>` for specific platforms.
* **Testing All Components Together:** To verify that all components can be tested together without ID conflicts or configuration issues, use:
```bash
./script/test_component_grouping.py -e config --all
```
This tests all components in a single build to catch conflicts that might not appear when testing components individually. Use `-e config` for fast configuration validation, or `-e compile` for full compilation testing.
* **Debugging and Troubleshooting:** * **Debugging and Troubleshooting:**
* **Debug Tools:** * **Debug Tools:**
- `esphome config <file>.yaml` to validate configuration. - `esphome config <file>.yaml` to validate configuration.

View File

@@ -177,6 +177,7 @@ jobs:
clang-tidy: ${{ steps.determine.outputs.clang-tidy }} clang-tidy: ${{ steps.determine.outputs.clang-tidy }}
python-linters: ${{ steps.determine.outputs.python-linters }} python-linters: ${{ steps.determine.outputs.python-linters }}
changed-components: ${{ steps.determine.outputs.changed-components }} changed-components: ${{ steps.determine.outputs.changed-components }}
changed-components-with-tests: ${{ steps.determine.outputs.changed-components-with-tests }}
component-test-count: ${{ steps.determine.outputs.component-test-count }} component-test-count: ${{ steps.determine.outputs.component-test-count }}
steps: steps:
- name: Check out code from GitHub - name: Check out code from GitHub
@@ -204,6 +205,7 @@ jobs:
echo "clang-tidy=$(echo "$output" | jq -r '.clang_tidy')" >> $GITHUB_OUTPUT echo "clang-tidy=$(echo "$output" | jq -r '.clang_tidy')" >> $GITHUB_OUTPUT
echo "python-linters=$(echo "$output" | jq -r '.python_linters')" >> $GITHUB_OUTPUT echo "python-linters=$(echo "$output" | jq -r '.python_linters')" >> $GITHUB_OUTPUT
echo "changed-components=$(echo "$output" | jq -c '.changed_components')" >> $GITHUB_OUTPUT echo "changed-components=$(echo "$output" | jq -c '.changed_components')" >> $GITHUB_OUTPUT
echo "changed-components-with-tests=$(echo "$output" | jq -c '.changed_components_with_tests')" >> $GITHUB_OUTPUT
echo "component-test-count=$(echo "$output" | jq -r '.component_test_count')" >> $GITHUB_OUTPUT echo "component-test-count=$(echo "$output" | jq -r '.component_test_count')" >> $GITHUB_OUTPUT
integration-tests: integration-tests:
@@ -367,12 +369,13 @@ jobs:
fail-fast: false fail-fast: false
max-parallel: 2 max-parallel: 2
matrix: matrix:
file: ${{ fromJson(needs.determine-jobs.outputs.changed-components) }} file: ${{ fromJson(needs.determine-jobs.outputs.changed-components-with-tests) }}
steps: steps:
- name: Install dependencies - name: Cache apt packages
run: | uses: awalsh128/cache-apt-pkgs-action@acb598e5ddbc6f68a970c5da0688d2f3a9f04d05 # v1.5.3
sudo apt-get update with:
sudo apt-get install libsdl2-dev packages: libsdl2-dev
version: 1.0
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
@@ -381,17 +384,17 @@ jobs:
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
cache-key: ${{ needs.common.outputs.cache-key }} cache-key: ${{ needs.common.outputs.cache-key }}
- name: test_build_components -e config -c ${{ matrix.file }} - name: Validate config for ${{ matrix.file }}
run: | run: |
. venv/bin/activate . venv/bin/activate
./script/test_build_components -e config -c ${{ matrix.file }} python3 script/test_build_components.py -e config -c ${{ matrix.file }}
- name: test_build_components -e compile -c ${{ matrix.file }} - name: Compile config for ${{ matrix.file }}
run: | run: |
. venv/bin/activate . venv/bin/activate
./script/test_build_components -e compile -c ${{ matrix.file }} python3 script/test_build_components.py -e compile -c ${{ matrix.file }}
test-build-components-splitter: test-build-components-splitter:
name: Split components for testing into 10 components per group name: Split components for intelligent grouping (40 weighted per batch)
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
needs: needs:
- common - common
@@ -402,14 +405,26 @@ jobs:
steps: steps:
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Split components into groups of 10 - name: Restore Python
uses: ./.github/actions/restore-python
with:
python-version: ${{ env.DEFAULT_PYTHON }}
cache-key: ${{ needs.common.outputs.cache-key }}
- name: Split components intelligently based on bus configurations
id: split id: split
run: | run: |
components=$(echo '${{ needs.determine-jobs.outputs.changed-components }}' | jq -c '.[]' | shuf | jq -s -c '[_nwise(10) | join(" ")]') . venv/bin/activate
echo "components=$components" >> $GITHUB_OUTPUT
# Use intelligent splitter that groups components with same bus configs
components='${{ needs.determine-jobs.outputs.changed-components-with-tests }}'
echo "Splitting components intelligently..."
output=$(python3 script/split_components_for_ci.py --components "$components" --batch-size 40 --output github)
echo "$output" >> $GITHUB_OUTPUT
test-build-components-split: test-build-components-split:
name: Test split components name: Test components batch (${{ matrix.components }})
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
needs: needs:
- common - common
@@ -418,17 +433,23 @@ jobs:
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.component-test-count) >= 100 if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.component-test-count) >= 100
strategy: strategy:
fail-fast: false fail-fast: false
max-parallel: 4 max-parallel: ${{ (github.base_ref == 'beta' || github.base_ref == 'release') && 8 || 4 }}
matrix: matrix:
components: ${{ fromJson(needs.test-build-components-splitter.outputs.matrix) }} components: ${{ fromJson(needs.test-build-components-splitter.outputs.matrix) }}
steps: steps:
- name: Show disk space
run: |
echo "Available disk space:"
df -h
- name: List components - name: List components
run: echo ${{ matrix.components }} run: echo ${{ matrix.components }}
- name: Install dependencies - name: Cache apt packages
run: | uses: awalsh128/cache-apt-pkgs-action@acb598e5ddbc6f68a970c5da0688d2f3a9f04d05 # v1.5.3
sudo apt-get update with:
sudo apt-get install libsdl2-dev packages: libsdl2-dev
version: 1.0
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
@@ -437,20 +458,37 @@ jobs:
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
cache-key: ${{ needs.common.outputs.cache-key }} cache-key: ${{ needs.common.outputs.cache-key }}
- name: Validate config - name: Validate and compile components with intelligent grouping
run: | run: |
. venv/bin/activate . venv/bin/activate
for component in ${{ matrix.components }}; do # Use /mnt for build files (70GB available vs ~29GB on /)
./script/test_build_components -e config -c $component # Bind mount PlatformIO directory to /mnt (tools, packages, build cache all go there)
done sudo mkdir -p /mnt/platformio
- name: Compile config sudo chown $USER:$USER /mnt/platformio
run: | mkdir -p ~/.platformio
. venv/bin/activate sudo mount --bind /mnt/platformio ~/.platformio
mkdir build_cache
export PLATFORMIO_BUILD_CACHE_DIR=$PWD/build_cache # Bind mount test build directory to /mnt
for component in ${{ matrix.components }}; do sudo mkdir -p /mnt/test_build_components_build
./script/test_build_components -e compile -c $component sudo chown $USER:$USER /mnt/test_build_components_build
done mkdir -p tests/test_build_components/build
sudo mount --bind /mnt/test_build_components_build tests/test_build_components/build
# Convert space-separated components to comma-separated for Python script
components_csv=$(echo "${{ matrix.components }}" | tr ' ' ',')
echo "Testing components: $components_csv"
echo ""
# Run config validation with grouping
python3 script/test_build_components.py -e config -c "$components_csv" -f
echo ""
echo "Config validation passed! Starting compilation..."
echo ""
# Run compilation with grouping
python3 script/test_build_components.py -e compile -c "$components_csv" -f
pre-commit-ci-lite: pre-commit-ci-lite:
name: pre-commit.ci lite name: pre-commit.ci lite

View File

@@ -48,7 +48,7 @@ PROJECT_NAME = ESPHome
# could be handy for archiving the generated documentation or if some version # could be handy for archiving the generated documentation or if some version
# control system is used. # control system is used.
PROJECT_NUMBER = 2025.10.0b1 PROJECT_NUMBER = 2025.10.0b2
# Using the PROJECT_BRIEF tag one can provide an optional one line description # Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a # for a project that appears at the top of each page and should give viewer a

View File

@@ -1002,6 +1002,12 @@ def parse_args(argv):
action="append", action="append",
default=[], default=[],
) )
options_parser.add_argument(
"--testing-mode",
help="Enable testing mode (disables validation checks for grouped component testing)",
action="store_true",
default=False,
)
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description=f"ESPHome {const.__version__}", parents=[options_parser] description=f"ESPHome {const.__version__}", parents=[options_parser]
@@ -1260,6 +1266,7 @@ def run_esphome(argv):
args = parse_args(argv) args = parse_args(argv)
CORE.dashboard = args.dashboard CORE.dashboard = args.dashboard
CORE.testing_mode = args.testing_mode
# Create address cache from command-line arguments # Create address cache from command-line arguments
CORE.address_cache = AddressCache.from_cli_args( CORE.address_cache = AddressCache.from_cli_args(

View File

@@ -105,9 +105,9 @@ class Canbus : public Component {
CallbackManager<void(uint32_t can_id, bool extended_id, bool rtr, const std::vector<uint8_t> &data)> CallbackManager<void(uint32_t can_id, bool extended_id, bool rtr, const std::vector<uint8_t> &data)>
callback_manager_{}; callback_manager_{};
virtual bool setup_internal(); virtual bool setup_internal() = 0;
virtual Error send_message(struct CanFrame *frame); virtual Error send_message(struct CanFrame *frame) = 0;
virtual Error read_message(struct CanFrame *frame); virtual Error read_message(struct CanFrame *frame) = 0;
}; };
template<typename... Ts> class CanbusSendAction : public Action<Ts...>, public Parented<Canbus> { template<typename... Ts> class CanbusSendAction : public Action<Ts...>, public Parented<Canbus> {

View File

@@ -5,7 +5,7 @@ namespace dashboard_import {
static std::string g_package_import_url; // NOLINT static std::string g_package_import_url; // NOLINT
std::string get_package_import_url() { return g_package_import_url; } const std::string &get_package_import_url() { return g_package_import_url; }
void set_package_import_url(std::string url) { g_package_import_url = std::move(url); } void set_package_import_url(std::string url) { g_package_import_url = std::move(url); }
} // namespace dashboard_import } // namespace dashboard_import

View File

@@ -5,7 +5,7 @@
namespace esphome { namespace esphome {
namespace dashboard_import { namespace dashboard_import {
std::string get_package_import_url(); const std::string &get_package_import_url();
void set_package_import_url(std::string url); void set_package_import_url(std::string url);
} // namespace dashboard_import } // namespace dashboard_import

View File

@@ -314,11 +314,12 @@ def _format_framework_espidf_version(ver: cv.Version, release: str) -> str:
# - https://github.com/espressif/arduino-esp32/releases # - https://github.com/espressif/arduino-esp32/releases
ARDUINO_FRAMEWORK_VERSION_LOOKUP = { ARDUINO_FRAMEWORK_VERSION_LOOKUP = {
"recommended": cv.Version(3, 2, 1), "recommended": cv.Version(3, 2, 1),
"latest": cv.Version(3, 3, 1), "latest": cv.Version(3, 3, 2),
"dev": cv.Version(3, 3, 1), "dev": cv.Version(3, 3, 2),
} }
ARDUINO_PLATFORM_VERSION_LOOKUP = { ARDUINO_PLATFORM_VERSION_LOOKUP = {
cv.Version(3, 3, 1): cv.Version(55, 3, 31), cv.Version(3, 3, 2): cv.Version(55, 3, 31, "1"),
cv.Version(3, 3, 1): cv.Version(55, 3, 31, "1"),
cv.Version(3, 3, 0): cv.Version(55, 3, 30, "2"), cv.Version(3, 3, 0): cv.Version(55, 3, 30, "2"),
cv.Version(3, 2, 1): cv.Version(54, 3, 21, "2"), cv.Version(3, 2, 1): cv.Version(54, 3, 21, "2"),
cv.Version(3, 2, 0): cv.Version(54, 3, 20), cv.Version(3, 2, 0): cv.Version(54, 3, 20),
@@ -336,8 +337,8 @@ ESP_IDF_FRAMEWORK_VERSION_LOOKUP = {
"dev": cv.Version(5, 5, 1), "dev": cv.Version(5, 5, 1),
} }
ESP_IDF_PLATFORM_VERSION_LOOKUP = { ESP_IDF_PLATFORM_VERSION_LOOKUP = {
cv.Version(5, 5, 1): cv.Version(55, 3, 31), cv.Version(5, 5, 1): cv.Version(55, 3, 31, "1"),
cv.Version(5, 5, 0): cv.Version(55, 3, 31), cv.Version(5, 5, 0): cv.Version(55, 3, 31, "1"),
cv.Version(5, 4, 2): cv.Version(54, 3, 21, "2"), cv.Version(5, 4, 2): cv.Version(54, 3, 21, "2"),
cv.Version(5, 4, 1): cv.Version(54, 3, 21, "2"), cv.Version(5, 4, 1): cv.Version(54, 3, 21, "2"),
cv.Version(5, 4, 0): cv.Version(54, 3, 21, "2"), cv.Version(5, 4, 0): cv.Version(54, 3, 21, "2"),
@@ -352,8 +353,8 @@ ESP_IDF_PLATFORM_VERSION_LOOKUP = {
# - https://github.com/pioarduino/platform-espressif32/releases # - https://github.com/pioarduino/platform-espressif32/releases
PLATFORM_VERSION_LOOKUP = { PLATFORM_VERSION_LOOKUP = {
"recommended": cv.Version(54, 3, 21, "2"), "recommended": cv.Version(54, 3, 21, "2"),
"latest": cv.Version(55, 3, 31), "latest": cv.Version(55, 3, 31, "1"),
"dev": "https://github.com/pioarduino/platform-espressif32.git#develop", "dev": cv.Version(55, 3, 31, "1"),
} }
@@ -645,6 +646,7 @@ def _show_framework_migration_message(name: str, variant: str) -> None:
+ "Why change? ESP-IDF offers:\n" + "Why change? ESP-IDF offers:\n"
+ color(AnsiFore.GREEN, " ✨ Up to 40% smaller binaries\n") + color(AnsiFore.GREEN, " ✨ Up to 40% smaller binaries\n")
+ color(AnsiFore.GREEN, " 🚀 Better performance and optimization\n") + color(AnsiFore.GREEN, " 🚀 Better performance and optimization\n")
+ color(AnsiFore.GREEN, " ⚡ 2-3x faster compile times\n")
+ color(AnsiFore.GREEN, " 📦 Custom-built firmware for your exact needs\n") + color(AnsiFore.GREEN, " 📦 Custom-built firmware for your exact needs\n")
+ color( + color(
AnsiFore.GREEN, AnsiFore.GREEN,
@@ -652,7 +654,6 @@ def _show_framework_migration_message(name: str, variant: str) -> None:
) )
+ "\n" + "\n"
+ "Trade-offs:\n" + "Trade-offs:\n"
+ color(AnsiFore.YELLOW, " ⏱️ Compile times are ~25% longer\n")
+ color(AnsiFore.YELLOW, " 🔄 Some components need migration\n") + color(AnsiFore.YELLOW, " 🔄 Some components need migration\n")
+ "\n" + "\n"
+ "What should I do?\n" + "What should I do?\n"

View File

@@ -285,6 +285,10 @@ def consume_connection_slots(
def validate_connection_slots(max_connections: int) -> None: def validate_connection_slots(max_connections: int) -> None:
"""Validate that BLE connection slots don't exceed the configured maximum.""" """Validate that BLE connection slots don't exceed the configured maximum."""
# Skip validation in testing mode to allow component grouping
if CORE.testing_mode:
return
ble_data = CORE.data.get(KEY_ESP32_BLE, {}) ble_data = CORE.data.get(KEY_ESP32_BLE, {})
used_slots = ble_data.get(KEY_USED_CONNECTION_SLOTS, []) used_slots = ble_data.get(KEY_USED_CONNECTION_SLOTS, [])
num_used = len(used_slots) num_used = len(used_slots)
@@ -332,12 +336,16 @@ def final_validation(config):
# Check if BLE Server is needed # Check if BLE Server is needed
has_ble_server = "esp32_ble_server" in full_config has_ble_server = "esp32_ble_server" in full_config
add_idf_sdkconfig_option("CONFIG_BT_GATTS_ENABLE", has_ble_server)
# Check if BLE Client is needed (via esp32_ble_tracker or esp32_ble_client) # Check if BLE Client is needed (via esp32_ble_tracker or esp32_ble_client)
has_ble_client = ( has_ble_client = (
"esp32_ble_tracker" in full_config or "esp32_ble_client" in full_config "esp32_ble_tracker" in full_config or "esp32_ble_client" in full_config
) )
# ESP-IDF BLE stack requires GATT Server to be enabled when GATT Client is enabled
# This is an internal dependency in the Bluedroid stack (tested ESP-IDF 5.4.2-5.5.1)
# See: https://github.com/espressif/esp-idf/issues/17724
add_idf_sdkconfig_option("CONFIG_BT_GATTS_ENABLE", has_ble_server or has_ble_client)
add_idf_sdkconfig_option("CONFIG_BT_GATTC_ENABLE", has_ble_client) add_idf_sdkconfig_option("CONFIG_BT_GATTC_ENABLE", has_ble_client)
# Handle max_connections: check for deprecated location in esp32_ble_tracker # Handle max_connections: check for deprecated location in esp32_ble_tracker

View File

@@ -14,10 +14,6 @@
#include "esphome/core/hal.h" #include "esphome/core/hal.h"
#include "esphome/core/helpers.h" #include "esphome/core/helpers.h"
#ifdef USE_ARDUINO
#include <esp32-hal-bt.h>
#endif
namespace esphome { namespace esphome {
namespace esp32_ble_beacon { namespace esp32_ble_beacon {

View File

@@ -25,10 +25,6 @@
#include <esp_coexist.h> #include <esp_coexist.h>
#endif #endif
#ifdef USE_ARDUINO
#include <esp32-hal-bt.h>
#endif
#define MBEDTLS_AES_ALT #define MBEDTLS_AES_ALT
#include <aes_alt.h> #include <aes_alt.h>

View File

@@ -143,6 +143,7 @@ void ESP32ImprovComponent::loop() {
#else #else
this->set_state_(improv::STATE_AUTHORIZED); this->set_state_(improv::STATE_AUTHORIZED);
#endif #endif
this->check_wifi_connection_();
break; break;
} }
case improv::STATE_AUTHORIZED: { case improv::STATE_AUTHORIZED: {
@@ -156,31 +157,12 @@ void ESP32ImprovComponent::loop() {
if (!this->check_identify_()) { if (!this->check_identify_()) {
this->set_status_indicator_state_((now % 1000) < 500); this->set_status_indicator_state_((now % 1000) < 500);
} }
this->check_wifi_connection_();
break; break;
} }
case improv::STATE_PROVISIONING: { case improv::STATE_PROVISIONING: {
this->set_status_indicator_state_((now % 200) < 100); this->set_status_indicator_state_((now % 200) < 100);
if (wifi::global_wifi_component->is_connected()) { this->check_wifi_connection_();
wifi::global_wifi_component->save_wifi_sta(this->connecting_sta_.get_ssid(),
this->connecting_sta_.get_password());
this->connecting_sta_ = {};
this->cancel_timeout("wifi-connect-timeout");
this->set_state_(improv::STATE_PROVISIONED);
std::vector<std::string> urls = {ESPHOME_MY_LINK};
#ifdef USE_WEBSERVER
for (auto &ip : wifi::global_wifi_component->wifi_sta_ip_addresses()) {
if (ip.is_ip4()) {
std::string webserver_url = "http://" + ip.str() + ":" + to_string(USE_WEBSERVER_PORT);
urls.push_back(webserver_url);
break;
}
}
#endif
std::vector<uint8_t> data = improv::build_rpc_response(improv::WIFI_SETTINGS, urls);
this->send_response_(data);
this->stop();
}
break; break;
} }
case improv::STATE_PROVISIONED: { case improv::STATE_PROVISIONED: {
@@ -392,6 +374,36 @@ void ESP32ImprovComponent::on_wifi_connect_timeout_() {
wifi::global_wifi_component->clear_sta(); wifi::global_wifi_component->clear_sta();
} }
void ESP32ImprovComponent::check_wifi_connection_() {
if (!wifi::global_wifi_component->is_connected()) {
return;
}
if (this->state_ == improv::STATE_PROVISIONING) {
wifi::global_wifi_component->save_wifi_sta(this->connecting_sta_.get_ssid(), this->connecting_sta_.get_password());
this->connecting_sta_ = {};
this->cancel_timeout("wifi-connect-timeout");
std::vector<std::string> urls = {ESPHOME_MY_LINK};
#ifdef USE_WEBSERVER
for (auto &ip : wifi::global_wifi_component->wifi_sta_ip_addresses()) {
if (ip.is_ip4()) {
std::string webserver_url = "http://" + ip.str() + ":" + to_string(USE_WEBSERVER_PORT);
urls.push_back(webserver_url);
break;
}
}
#endif
std::vector<uint8_t> data = improv::build_rpc_response(improv::WIFI_SETTINGS, urls);
this->send_response_(data);
} else if (this->is_active() && this->state_ != improv::STATE_PROVISIONED) {
ESP_LOGD(TAG, "WiFi provisioned externally");
}
this->set_state_(improv::STATE_PROVISIONED);
this->stop();
}
void ESP32ImprovComponent::advertise_service_data_() { void ESP32ImprovComponent::advertise_service_data_() {
uint8_t service_data[IMPROV_SERVICE_DATA_SIZE] = {}; uint8_t service_data[IMPROV_SERVICE_DATA_SIZE] = {};
service_data[0] = IMPROV_PROTOCOL_ID_1; // PR service_data[0] = IMPROV_PROTOCOL_ID_1; // PR

View File

@@ -111,6 +111,7 @@ class ESP32ImprovComponent : public Component {
void send_response_(std::vector<uint8_t> &response); void send_response_(std::vector<uint8_t> &response);
void process_incoming_data_(); void process_incoming_data_();
void on_wifi_connect_timeout_(); void on_wifi_connect_timeout_();
void check_wifi_connection_();
bool check_identify_(); bool check_identify_();
void advertise_service_data_(); void advertise_service_data_();
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_DEBUG #if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_DEBUG

View File

@@ -29,7 +29,7 @@ namespace esphome {
static const char *const TAG = "esphome.ota"; static const char *const TAG = "esphome.ota";
static constexpr uint16_t OTA_BLOCK_SIZE = 8192; static constexpr uint16_t OTA_BLOCK_SIZE = 8192;
static constexpr size_t OTA_BUFFER_SIZE = 1024; // buffer size for OTA data transfer static constexpr size_t OTA_BUFFER_SIZE = 1024; // buffer size for OTA data transfer
static constexpr uint32_t OTA_SOCKET_TIMEOUT_HANDSHAKE = 10000; // milliseconds for initial handshake static constexpr uint32_t OTA_SOCKET_TIMEOUT_HANDSHAKE = 20000; // milliseconds for initial handshake
static constexpr uint32_t OTA_SOCKET_TIMEOUT_DATA = 90000; // milliseconds for data transfer static constexpr uint32_t OTA_SOCKET_TIMEOUT_DATA = 90000; // milliseconds for data transfer
#ifdef USE_OTA_PASSWORD #ifdef USE_OTA_PASSWORD

View File

@@ -8,6 +8,13 @@ namespace json {
static const char *const TAG = "json"; static const char *const TAG = "json";
#ifdef USE_PSRAM
// Global allocator that outlives all JsonDocuments returned by parse_json()
// This prevents dangling pointer issues when JsonDocuments are returned from functions
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) - Must be mutable for ArduinoJson::Allocator
static SpiRamAllocator global_json_allocator;
#endif
std::string build_json(const json_build_t &f) { std::string build_json(const json_build_t &f) {
// NOLINTBEGIN(clang-analyzer-cplusplus.NewDeleteLeaks) false positive with ArduinoJson // NOLINTBEGIN(clang-analyzer-cplusplus.NewDeleteLeaks) false positive with ArduinoJson
JsonBuilder builder; JsonBuilder builder;
@@ -33,8 +40,7 @@ JsonDocument parse_json(const uint8_t *data, size_t len) {
return JsonObject(); // return unbound object return JsonObject(); // return unbound object
} }
#ifdef USE_PSRAM #ifdef USE_PSRAM
auto doc_allocator = SpiRamAllocator(); JsonDocument json_document(&global_json_allocator);
JsonDocument json_document(&doc_allocator);
#else #else
JsonDocument json_document; JsonDocument json_document;
#endif #endif

View File

@@ -21,11 +21,11 @@ template<uint8_t N> class MCP23XXXBase : public Component, public gpio_expander:
protected: protected:
// read a given register // read a given register
virtual bool read_reg(uint8_t reg, uint8_t *value); virtual bool read_reg(uint8_t reg, uint8_t *value) = 0;
// write a value to a given register // write a value to a given register
virtual bool write_reg(uint8_t reg, uint8_t value); virtual bool write_reg(uint8_t reg, uint8_t value) = 0;
// update registers with given pin value. // update registers with given pin value.
virtual void update_reg(uint8_t pin, bool pin_value, uint8_t reg_a); virtual void update_reg(uint8_t pin, bool pin_value, uint8_t reg_a) = 0;
bool open_drain_ints_; bool open_drain_ints_;
}; };

View File

@@ -11,7 +11,7 @@ from esphome.const import (
CONF_SERVICES, CONF_SERVICES,
PlatformFramework, PlatformFramework,
) )
from esphome.core import CORE, coroutine_with_priority from esphome.core import CORE, Lambda, coroutine_with_priority
from esphome.coroutine import CoroPriority from esphome.coroutine import CoroPriority
CODEOWNERS = ["@esphome/core"] CODEOWNERS = ["@esphome/core"]
@@ -58,17 +58,64 @@ CONFIG_SCHEMA = cv.All(
) )
def mdns_txt_record(key: str, value: str): def mdns_txt_record(key: str, value: str) -> cg.RawExpression:
return cg.StructInitializer( """Create a mDNS TXT record.
MDNSTXTRecord,
("key", cg.RawExpression(f"MDNS_STR({cg.safe_exp(key)})")), Public API for external components. Do not remove.
("value", value),
Args:
key: The TXT record key
value: The TXT record value (static string only)
Returns:
A RawExpression representing a MDNSTXTRecord struct
"""
return cg.RawExpression(
f"{{MDNS_STR({cg.safe_exp(key)}), MDNS_STR({cg.safe_exp(value)})}}"
) )
async def _mdns_txt_record_templated(
mdns_comp: cg.Pvariable, key: str, value: Lambda | str
) -> cg.RawExpression:
"""Create a mDNS TXT record with support for templated values.
Internal helper function.
Args:
mdns_comp: The MDNSComponent instance (from cg.get_variable())
key: The TXT record key
value: The TXT record value (can be a static string or a lambda template)
Returns:
A RawExpression representing a MDNSTXTRecord struct
"""
if not cg.is_template(value):
# It's a static string - use directly in flash, no need to store in vector
return mdns_txt_record(key, value)
# It's a lambda - evaluate and store using helper
templated_value = await cg.templatable(value, [], cg.std_string)
safe_key = cg.safe_exp(key)
dynamic_call = f"{mdns_comp}->add_dynamic_txt_value(({templated_value})())"
return cg.RawExpression(f"{{MDNS_STR({safe_key}), MDNS_STR({dynamic_call})}}")
def mdns_service( def mdns_service(
service: str, proto: str, port: int, txt_records: list[dict[str, str]] service: str, proto: str, port: int, txt_records: list[cg.RawExpression]
): ) -> cg.StructInitializer:
"""Create a mDNS service.
Public API for external components. Do not remove.
Args:
service: Service name (e.g., "_http")
proto: Protocol (e.g., "_tcp" or "_udp")
port: Port number
txt_records: List of MDNSTXTRecord expressions
Returns:
A StructInitializer representing a MDNSService struct
"""
return cg.StructInitializer( return cg.StructInitializer(
MDNSService, MDNSService,
("service_type", cg.RawExpression(f"MDNS_STR({cg.safe_exp(service)})")), ("service_type", cg.RawExpression(f"MDNS_STR({cg.safe_exp(service)})")),
@@ -107,23 +154,37 @@ async def to_code(config):
# Ensure at least 1 service (fallback service) # Ensure at least 1 service (fallback service)
cg.add_define("MDNS_SERVICE_COUNT", max(1, service_count)) cg.add_define("MDNS_SERVICE_COUNT", max(1, service_count))
# Calculate compile-time dynamic TXT value count
# Dynamic values are those that cannot be stored in flash at compile time
dynamic_txt_count = 0
if "api" in CORE.config:
# Always: get_mac_address()
dynamic_txt_count += 1
# User-provided templatable TXT values (only lambdas, not static strings)
dynamic_txt_count += sum(
1
for service in config[CONF_SERVICES]
for txt_value in service[CONF_TXT].values()
if cg.is_template(txt_value)
)
# Ensure at least 1 to avoid zero-size array
cg.add_define("MDNS_DYNAMIC_TXT_COUNT", max(1, dynamic_txt_count))
var = cg.new_Pvariable(config[CONF_ID]) var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config) await cg.register_component(var, config)
for service in config[CONF_SERVICES]: for service in config[CONF_SERVICES]:
txt = [ txt_records = [
cg.StructInitializer( await _mdns_txt_record_templated(var, txt_key, txt_value)
MDNSTXTRecord,
("key", cg.RawExpression(f"MDNS_STR({cg.safe_exp(txt_key)})")),
("value", await cg.templatable(txt_value, [], cg.std_string)),
)
for txt_key, txt_value in service[CONF_TXT].items() for txt_key, txt_value in service[CONF_TXT].items()
] ]
exp = mdns_service( exp = mdns_service(
service[CONF_SERVICE], service[CONF_SERVICE],
service[CONF_PROTOCOL], service[CONF_PROTOCOL],
await cg.templatable(service[CONF_PORT], [], cg.uint16), await cg.templatable(service[CONF_PORT], [], cg.uint16),
txt, txt_records,
) )
cg.add(var.add_extra_service(exp)) cg.add(var.add_extra_service(exp))

View File

@@ -9,21 +9,9 @@
#include <pgmspace.h> #include <pgmspace.h>
// Macro to define strings in PROGMEM on ESP8266, regular memory on other platforms // Macro to define strings in PROGMEM on ESP8266, regular memory on other platforms
#define MDNS_STATIC_CONST_CHAR(name, value) static const char name[] PROGMEM = value #define MDNS_STATIC_CONST_CHAR(name, value) static const char name[] PROGMEM = value
// Helper to convert PROGMEM string to std::string for TemplatableValue
// Only define this function if we have services that will use it
#if defined(USE_API) || defined(USE_PROMETHEUS) || defined(USE_WEBSERVER) || defined(USE_MDNS_EXTRA_SERVICES)
static std::string mdns_str_value(PGM_P str) {
char buf[64];
strncpy_P(buf, str, sizeof(buf) - 1);
buf[sizeof(buf) - 1] = '\0';
return std::string(buf);
}
#define MDNS_STR_VALUE(name) mdns_str_value(name)
#endif
#else #else
// On non-ESP8266 platforms, use regular const char* // On non-ESP8266 platforms, use regular const char*
#define MDNS_STATIC_CONST_CHAR(name, value) static constexpr const char name[] = value #define MDNS_STATIC_CONST_CHAR(name, value) static constexpr const char name[] = value
#define MDNS_STR_VALUE(name) std::string(name)
#endif #endif
#ifdef USE_API #ifdef USE_API
@@ -43,30 +31,10 @@ static const char *const TAG = "mdns";
#endif #endif
// Define all constant strings using the macro // Define all constant strings using the macro
MDNS_STATIC_CONST_CHAR(SERVICE_ESPHOMELIB, "_esphomelib");
MDNS_STATIC_CONST_CHAR(SERVICE_TCP, "_tcp"); MDNS_STATIC_CONST_CHAR(SERVICE_TCP, "_tcp");
MDNS_STATIC_CONST_CHAR(SERVICE_PROMETHEUS, "_prometheus-http");
MDNS_STATIC_CONST_CHAR(SERVICE_HTTP, "_http");
MDNS_STATIC_CONST_CHAR(TXT_FRIENDLY_NAME, "friendly_name"); // Wrap build-time defines into flash storage
MDNS_STATIC_CONST_CHAR(TXT_VERSION, "version"); MDNS_STATIC_CONST_CHAR(VALUE_VERSION, ESPHOME_VERSION);
MDNS_STATIC_CONST_CHAR(TXT_MAC, "mac");
MDNS_STATIC_CONST_CHAR(TXT_PLATFORM, "platform");
MDNS_STATIC_CONST_CHAR(TXT_BOARD, "board");
MDNS_STATIC_CONST_CHAR(TXT_NETWORK, "network");
MDNS_STATIC_CONST_CHAR(TXT_API_ENCRYPTION, "api_encryption");
MDNS_STATIC_CONST_CHAR(TXT_API_ENCRYPTION_SUPPORTED, "api_encryption_supported");
MDNS_STATIC_CONST_CHAR(TXT_PROJECT_NAME, "project_name");
MDNS_STATIC_CONST_CHAR(TXT_PROJECT_VERSION, "project_version");
MDNS_STATIC_CONST_CHAR(TXT_PACKAGE_IMPORT_URL, "package_import_url");
MDNS_STATIC_CONST_CHAR(PLATFORM_ESP8266, "ESP8266");
MDNS_STATIC_CONST_CHAR(PLATFORM_ESP32, "ESP32");
MDNS_STATIC_CONST_CHAR(PLATFORM_RP2040, "RP2040");
MDNS_STATIC_CONST_CHAR(NETWORK_WIFI, "wifi");
MDNS_STATIC_CONST_CHAR(NETWORK_ETHERNET, "ethernet");
MDNS_STATIC_CONST_CHAR(NETWORK_THREAD, "thread");
void MDNSComponent::compile_records_() { void MDNSComponent::compile_records_() {
this->hostname_ = App.get_name(); this->hostname_ = App.get_name();
@@ -75,6 +43,15 @@ void MDNSComponent::compile_records_() {
// in mdns/__init__.py. If you add a new service here, update both locations. // in mdns/__init__.py. If you add a new service here, update both locations.
#ifdef USE_API #ifdef USE_API
MDNS_STATIC_CONST_CHAR(SERVICE_ESPHOMELIB, "_esphomelib");
MDNS_STATIC_CONST_CHAR(TXT_FRIENDLY_NAME, "friendly_name");
MDNS_STATIC_CONST_CHAR(TXT_VERSION, "version");
MDNS_STATIC_CONST_CHAR(TXT_MAC, "mac");
MDNS_STATIC_CONST_CHAR(TXT_PLATFORM, "platform");
MDNS_STATIC_CONST_CHAR(TXT_BOARD, "board");
MDNS_STATIC_CONST_CHAR(TXT_NETWORK, "network");
MDNS_STATIC_CONST_CHAR(VALUE_BOARD, ESPHOME_BOARD);
if (api::global_api_server != nullptr) { if (api::global_api_server != nullptr) {
auto &service = this->services_.emplace_next(); auto &service = this->services_.emplace_next();
service.service_type = MDNS_STR(SERVICE_ESPHOMELIB); service.service_type = MDNS_STR(SERVICE_ESPHOMELIB);
@@ -109,52 +86,66 @@ void MDNSComponent::compile_records_() {
txt_records.reserve(txt_count); txt_records.reserve(txt_count);
if (!friendly_name_empty) { if (!friendly_name_empty) {
txt_records.push_back({MDNS_STR(TXT_FRIENDLY_NAME), friendly_name}); txt_records.push_back({MDNS_STR(TXT_FRIENDLY_NAME), MDNS_STR(friendly_name.c_str())});
} }
txt_records.push_back({MDNS_STR(TXT_VERSION), ESPHOME_VERSION}); txt_records.push_back({MDNS_STR(TXT_VERSION), MDNS_STR(VALUE_VERSION)});
txt_records.push_back({MDNS_STR(TXT_MAC), get_mac_address()}); txt_records.push_back({MDNS_STR(TXT_MAC), MDNS_STR(this->add_dynamic_txt_value(get_mac_address()))});
#ifdef USE_ESP8266 #ifdef USE_ESP8266
txt_records.push_back({MDNS_STR(TXT_PLATFORM), MDNS_STR_VALUE(PLATFORM_ESP8266)}); MDNS_STATIC_CONST_CHAR(PLATFORM_ESP8266, "ESP8266");
txt_records.push_back({MDNS_STR(TXT_PLATFORM), MDNS_STR(PLATFORM_ESP8266)});
#elif defined(USE_ESP32) #elif defined(USE_ESP32)
txt_records.push_back({MDNS_STR(TXT_PLATFORM), MDNS_STR_VALUE(PLATFORM_ESP32)}); MDNS_STATIC_CONST_CHAR(PLATFORM_ESP32, "ESP32");
txt_records.push_back({MDNS_STR(TXT_PLATFORM), MDNS_STR(PLATFORM_ESP32)});
#elif defined(USE_RP2040) #elif defined(USE_RP2040)
txt_records.push_back({MDNS_STR(TXT_PLATFORM), MDNS_STR_VALUE(PLATFORM_RP2040)}); MDNS_STATIC_CONST_CHAR(PLATFORM_RP2040, "RP2040");
txt_records.push_back({MDNS_STR(TXT_PLATFORM), MDNS_STR(PLATFORM_RP2040)});
#elif defined(USE_LIBRETINY) #elif defined(USE_LIBRETINY)
txt_records.push_back({MDNS_STR(TXT_PLATFORM), lt_cpu_get_model_name()}); txt_records.push_back({MDNS_STR(TXT_PLATFORM), MDNS_STR(lt_cpu_get_model_name())});
#endif #endif
txt_records.push_back({MDNS_STR(TXT_BOARD), ESPHOME_BOARD}); txt_records.push_back({MDNS_STR(TXT_BOARD), MDNS_STR(VALUE_BOARD)});
#if defined(USE_WIFI) #if defined(USE_WIFI)
txt_records.push_back({MDNS_STR(TXT_NETWORK), MDNS_STR_VALUE(NETWORK_WIFI)}); MDNS_STATIC_CONST_CHAR(NETWORK_WIFI, "wifi");
txt_records.push_back({MDNS_STR(TXT_NETWORK), MDNS_STR(NETWORK_WIFI)});
#elif defined(USE_ETHERNET) #elif defined(USE_ETHERNET)
txt_records.push_back({MDNS_STR(TXT_NETWORK), MDNS_STR_VALUE(NETWORK_ETHERNET)}); MDNS_STATIC_CONST_CHAR(NETWORK_ETHERNET, "ethernet");
txt_records.push_back({MDNS_STR(TXT_NETWORK), MDNS_STR(NETWORK_ETHERNET)});
#elif defined(USE_OPENTHREAD) #elif defined(USE_OPENTHREAD)
txt_records.push_back({MDNS_STR(TXT_NETWORK), MDNS_STR_VALUE(NETWORK_THREAD)}); MDNS_STATIC_CONST_CHAR(NETWORK_THREAD, "thread");
txt_records.push_back({MDNS_STR(TXT_NETWORK), MDNS_STR(NETWORK_THREAD)});
#endif #endif
#ifdef USE_API_NOISE #ifdef USE_API_NOISE
MDNS_STATIC_CONST_CHAR(TXT_API_ENCRYPTION, "api_encryption");
MDNS_STATIC_CONST_CHAR(TXT_API_ENCRYPTION_SUPPORTED, "api_encryption_supported");
MDNS_STATIC_CONST_CHAR(NOISE_ENCRYPTION, "Noise_NNpsk0_25519_ChaChaPoly_SHA256"); MDNS_STATIC_CONST_CHAR(NOISE_ENCRYPTION, "Noise_NNpsk0_25519_ChaChaPoly_SHA256");
if (api::global_api_server->get_noise_ctx()->has_psk()) { bool has_psk = api::global_api_server->get_noise_ctx()->has_psk();
txt_records.push_back({MDNS_STR(TXT_API_ENCRYPTION), MDNS_STR_VALUE(NOISE_ENCRYPTION)}); const char *encryption_key = has_psk ? TXT_API_ENCRYPTION : TXT_API_ENCRYPTION_SUPPORTED;
} else { txt_records.push_back({MDNS_STR(encryption_key), MDNS_STR(NOISE_ENCRYPTION)});
txt_records.push_back({MDNS_STR(TXT_API_ENCRYPTION_SUPPORTED), MDNS_STR_VALUE(NOISE_ENCRYPTION)});
}
#endif #endif
#ifdef ESPHOME_PROJECT_NAME #ifdef ESPHOME_PROJECT_NAME
txt_records.push_back({MDNS_STR(TXT_PROJECT_NAME), ESPHOME_PROJECT_NAME}); MDNS_STATIC_CONST_CHAR(TXT_PROJECT_NAME, "project_name");
txt_records.push_back({MDNS_STR(TXT_PROJECT_VERSION), ESPHOME_PROJECT_VERSION}); MDNS_STATIC_CONST_CHAR(TXT_PROJECT_VERSION, "project_version");
MDNS_STATIC_CONST_CHAR(VALUE_PROJECT_NAME, ESPHOME_PROJECT_NAME);
MDNS_STATIC_CONST_CHAR(VALUE_PROJECT_VERSION, ESPHOME_PROJECT_VERSION);
txt_records.push_back({MDNS_STR(TXT_PROJECT_NAME), MDNS_STR(VALUE_PROJECT_NAME)});
txt_records.push_back({MDNS_STR(TXT_PROJECT_VERSION), MDNS_STR(VALUE_PROJECT_VERSION)});
#endif // ESPHOME_PROJECT_NAME #endif // ESPHOME_PROJECT_NAME
#ifdef USE_DASHBOARD_IMPORT #ifdef USE_DASHBOARD_IMPORT
txt_records.push_back({MDNS_STR(TXT_PACKAGE_IMPORT_URL), dashboard_import::get_package_import_url()}); MDNS_STATIC_CONST_CHAR(TXT_PACKAGE_IMPORT_URL, "package_import_url");
txt_records.push_back(
{MDNS_STR(TXT_PACKAGE_IMPORT_URL), MDNS_STR(dashboard_import::get_package_import_url().c_str())});
#endif #endif
} }
#endif // USE_API #endif // USE_API
#ifdef USE_PROMETHEUS #ifdef USE_PROMETHEUS
MDNS_STATIC_CONST_CHAR(SERVICE_PROMETHEUS, "_prometheus-http");
auto &prom_service = this->services_.emplace_next(); auto &prom_service = this->services_.emplace_next();
prom_service.service_type = MDNS_STR(SERVICE_PROMETHEUS); prom_service.service_type = MDNS_STR(SERVICE_PROMETHEUS);
prom_service.proto = MDNS_STR(SERVICE_TCP); prom_service.proto = MDNS_STR(SERVICE_TCP);
@@ -162,6 +153,8 @@ void MDNSComponent::compile_records_() {
#endif #endif
#ifdef USE_WEBSERVER #ifdef USE_WEBSERVER
MDNS_STATIC_CONST_CHAR(SERVICE_HTTP, "_http");
auto &web_service = this->services_.emplace_next(); auto &web_service = this->services_.emplace_next();
web_service.service_type = MDNS_STR(SERVICE_HTTP); web_service.service_type = MDNS_STR(SERVICE_HTTP);
web_service.proto = MDNS_STR(SERVICE_TCP); web_service.proto = MDNS_STR(SERVICE_TCP);
@@ -169,13 +162,16 @@ void MDNSComponent::compile_records_() {
#endif #endif
#if !defined(USE_API) && !defined(USE_PROMETHEUS) && !defined(USE_WEBSERVER) && !defined(USE_MDNS_EXTRA_SERVICES) #if !defined(USE_API) && !defined(USE_PROMETHEUS) && !defined(USE_WEBSERVER) && !defined(USE_MDNS_EXTRA_SERVICES)
MDNS_STATIC_CONST_CHAR(SERVICE_HTTP, "_http");
MDNS_STATIC_CONST_CHAR(TXT_VERSION, "version");
// Publish "http" service if not using native API or any other services // Publish "http" service if not using native API or any other services
// This is just to have *some* mDNS service so that .local resolution works // This is just to have *some* mDNS service so that .local resolution works
auto &fallback_service = this->services_.emplace_next(); auto &fallback_service = this->services_.emplace_next();
fallback_service.service_type = MDNS_STR(SERVICE_HTTP); fallback_service.service_type = MDNS_STR(SERVICE_HTTP);
fallback_service.proto = MDNS_STR(SERVICE_TCP); fallback_service.proto = MDNS_STR(SERVICE_TCP);
fallback_service.port = USE_WEBSERVER_PORT; fallback_service.port = USE_WEBSERVER_PORT;
fallback_service.txt_records.push_back({MDNS_STR(TXT_VERSION), ESPHOME_VERSION}); fallback_service.txt_records.push_back({MDNS_STR(TXT_VERSION), MDNS_STR(VALUE_VERSION)});
#endif #endif
} }
@@ -190,8 +186,7 @@ void MDNSComponent::dump_config() {
ESP_LOGV(TAG, " - %s, %s, %d", MDNS_STR_ARG(service.service_type), MDNS_STR_ARG(service.proto), ESP_LOGV(TAG, " - %s, %s, %d", MDNS_STR_ARG(service.service_type), MDNS_STR_ARG(service.proto),
const_cast<TemplatableValue<uint16_t> &>(service.port).value()); const_cast<TemplatableValue<uint16_t> &>(service.port).value());
for (const auto &record : service.txt_records) { for (const auto &record : service.txt_records) {
ESP_LOGV(TAG, " TXT: %s = %s", MDNS_STR_ARG(record.key), ESP_LOGV(TAG, " TXT: %s = %s", MDNS_STR_ARG(record.key), MDNS_STR_ARG(record.value));
const_cast<TemplatableValue<std::string> &>(record.value).value().c_str());
} }
} }
#endif #endif

View File

@@ -27,7 +27,7 @@ struct MDNSString;
struct MDNSTXTRecord { struct MDNSTXTRecord {
const MDNSString *key; const MDNSString *key;
TemplatableValue<std::string> value; const MDNSString *value;
}; };
struct MDNSService { struct MDNSService {
@@ -59,6 +59,17 @@ class MDNSComponent : public Component {
void on_shutdown() override; void on_shutdown() override;
/// Add a dynamic TXT value and return pointer to it for use in MDNSTXTRecord
const char *add_dynamic_txt_value(const std::string &value) {
this->dynamic_txt_values_.push_back(value);
return this->dynamic_txt_values_[this->dynamic_txt_values_.size() - 1].c_str();
}
/// Storage for runtime-generated TXT values (MAC address, user lambdas)
/// Pre-sized at compile time via MDNS_DYNAMIC_TXT_COUNT to avoid heap allocations.
/// Static/compile-time values (version, board, etc.) are stored directly in flash and don't use this.
StaticVector<std::string, MDNS_DYNAMIC_TXT_COUNT> dynamic_txt_values_;
protected: protected:
StaticVector<MDNSService, MDNS_SERVICE_COUNT> services_{}; StaticVector<MDNSService, MDNS_SERVICE_COUNT> services_{};
std::string hostname_; std::string hostname_;

View File

@@ -2,7 +2,6 @@
#if defined(USE_ESP32) && defined(USE_MDNS) #if defined(USE_ESP32) && defined(USE_MDNS)
#include <mdns.h> #include <mdns.h>
#include <cstring>
#include "esphome/core/hal.h" #include "esphome/core/hal.h"
#include "esphome/core/log.h" #include "esphome/core/log.h"
#include "mdns_component.h" #include "mdns_component.h"
@@ -29,21 +28,16 @@ void MDNSComponent::setup() {
std::vector<mdns_txt_item_t> txt_records; std::vector<mdns_txt_item_t> txt_records;
for (const auto &record : service.txt_records) { for (const auto &record : service.txt_records) {
mdns_txt_item_t it{}; mdns_txt_item_t it{};
// key is a compile-time string literal in flash, no need to strdup // key and value are either compile-time string literals in flash or pointers to dynamic_txt_values_
// Both remain valid for the lifetime of this function, and ESP-IDF makes internal copies
it.key = MDNS_STR_ARG(record.key); it.key = MDNS_STR_ARG(record.key);
// value is a temporary from TemplatableValue, must strdup to keep it alive it.value = MDNS_STR_ARG(record.value);
it.value = strdup(const_cast<TemplatableValue<std::string> &>(record.value).value().c_str());
txt_records.push_back(it); txt_records.push_back(it);
} }
uint16_t port = const_cast<TemplatableValue<uint16_t> &>(service.port).value(); uint16_t port = const_cast<TemplatableValue<uint16_t> &>(service.port).value();
err = mdns_service_add(nullptr, MDNS_STR_ARG(service.service_type), MDNS_STR_ARG(service.proto), port, err = mdns_service_add(nullptr, MDNS_STR_ARG(service.service_type), MDNS_STR_ARG(service.proto), port,
txt_records.data(), txt_records.size()); txt_records.data(), txt_records.size());
// free records
for (const auto &it : txt_records) {
free((void *) it.value); // NOLINT(cppcoreguidelines-no-malloc)
}
if (err != ESP_OK) { if (err != ESP_OK) {
ESP_LOGW(TAG, "Failed to register service %s: %s", MDNS_STR_ARG(service.service_type), esp_err_to_name(err)); ESP_LOGW(TAG, "Failed to register service %s: %s", MDNS_STR_ARG(service.service_type), esp_err_to_name(err));
} }

View File

@@ -33,7 +33,7 @@ void MDNSComponent::setup() {
MDNS.addService(FPSTR(service_type), FPSTR(proto), port); MDNS.addService(FPSTR(service_type), FPSTR(proto), port);
for (const auto &record : service.txt_records) { for (const auto &record : service.txt_records) {
MDNS.addServiceTxt(FPSTR(service_type), FPSTR(proto), FPSTR(MDNS_STR_ARG(record.key)), MDNS.addServiceTxt(FPSTR(service_type), FPSTR(proto), FPSTR(MDNS_STR_ARG(record.key)),
const_cast<TemplatableValue<std::string> &>(record.value).value().c_str()); FPSTR(MDNS_STR_ARG(record.value)));
} }
} }
} }

View File

@@ -32,8 +32,7 @@ void MDNSComponent::setup() {
uint16_t port_ = const_cast<TemplatableValue<uint16_t> &>(service.port).value(); uint16_t port_ = const_cast<TemplatableValue<uint16_t> &>(service.port).value();
MDNS.addService(service_type, proto, port_); MDNS.addService(service_type, proto, port_);
for (const auto &record : service.txt_records) { for (const auto &record : service.txt_records) {
MDNS.addServiceTxt(service_type, proto, MDNS_STR_ARG(record.key), MDNS.addServiceTxt(service_type, proto, MDNS_STR_ARG(record.key), MDNS_STR_ARG(record.value));
const_cast<TemplatableValue<std::string> &>(record.value).value().c_str());
} }
} }
} }

View File

@@ -32,8 +32,7 @@ void MDNSComponent::setup() {
uint16_t port = const_cast<TemplatableValue<uint16_t> &>(service.port).value(); uint16_t port = const_cast<TemplatableValue<uint16_t> &>(service.port).value();
MDNS.addService(service_type, proto, port); MDNS.addService(service_type, proto, port);
for (const auto &record : service.txt_records) { for (const auto &record : service.txt_records) {
MDNS.addServiceTxt(service_type, proto, MDNS_STR_ARG(record.key), MDNS.addServiceTxt(service_type, proto, MDNS_STR_ARG(record.key), MDNS_STR_ARG(record.value));
const_cast<TemplatableValue<std::string> &>(record.value).value().c_str());
} }
} }
} }

View File

@@ -7,7 +7,7 @@
#include "opentherm.h" #include "opentherm.h"
#include "esphome/core/helpers.h" #include "esphome/core/helpers.h"
#if defined(ESP32) || defined(USE_ESP_IDF) #ifdef USE_ESP32
#include "driver/timer.h" #include "driver/timer.h"
#include "esp_err.h" #include "esp_err.h"
#endif #endif
@@ -31,7 +31,7 @@ OpenTherm *OpenTherm::instance = nullptr;
OpenTherm::OpenTherm(InternalGPIOPin *in_pin, InternalGPIOPin *out_pin, int32_t device_timeout) OpenTherm::OpenTherm(InternalGPIOPin *in_pin, InternalGPIOPin *out_pin, int32_t device_timeout)
: in_pin_(in_pin), : in_pin_(in_pin),
out_pin_(out_pin), out_pin_(out_pin),
#if defined(ESP32) || defined(USE_ESP_IDF) #ifdef USE_ESP32
timer_group_(TIMER_GROUP_0), timer_group_(TIMER_GROUP_0),
timer_idx_(TIMER_0), timer_idx_(TIMER_0),
#endif #endif
@@ -57,7 +57,7 @@ bool OpenTherm::initialize() {
this->out_pin_->setup(); this->out_pin_->setup();
this->out_pin_->digital_write(true); this->out_pin_->digital_write(true);
#if defined(ESP32) || defined(USE_ESP_IDF) #ifdef USE_ESP32
return this->init_esp32_timer_(); return this->init_esp32_timer_();
#else #else
return true; return true;
@@ -238,7 +238,7 @@ void IRAM_ATTR OpenTherm::write_bit_(uint8_t high, uint8_t clock) {
} }
} }
#if defined(ESP32) || defined(USE_ESP_IDF) #ifdef USE_ESP32
bool OpenTherm::init_esp32_timer_() { bool OpenTherm::init_esp32_timer_() {
// Search for a free timer. Maybe unstable, we'll see. // Search for a free timer. Maybe unstable, we'll see.
@@ -365,7 +365,7 @@ void IRAM_ATTR OpenTherm::stop_timer_() {
} }
} }
#endif // END ESP32 #endif // USE_ESP32
#ifdef ESP8266 #ifdef ESP8266
// 5 kHz timer_ // 5 kHz timer_

View File

@@ -12,7 +12,7 @@
#include "esphome/core/helpers.h" #include "esphome/core/helpers.h"
#include "esphome/core/log.h" #include "esphome/core/log.h"
#if defined(ESP32) || defined(USE_ESP_IDF) #ifdef USE_ESP32
#include "driver/timer.h" #include "driver/timer.h"
#endif #endif
@@ -356,7 +356,7 @@ class OpenTherm {
ISRInternalGPIOPin isr_in_pin_; ISRInternalGPIOPin isr_in_pin_;
ISRInternalGPIOPin isr_out_pin_; ISRInternalGPIOPin isr_out_pin_;
#if defined(ESP32) || defined(USE_ESP_IDF) #ifdef USE_ESP32
timer_group_t timer_group_; timer_group_t timer_group_;
timer_idx_t timer_idx_; timer_idx_t timer_idx_;
#endif #endif
@@ -370,7 +370,7 @@ class OpenTherm {
int32_t timeout_counter_; // <0 no timeout int32_t timeout_counter_; // <0 no timeout
int32_t device_timeout_; int32_t device_timeout_;
#if defined(ESP32) || defined(USE_ESP_IDF) #ifdef USE_ESP32
esp_err_t timer_error_ = ESP_OK; esp_err_t timer_error_ = ESP_OK;
TimerErrorType timer_error_type_ = TimerErrorType::NO_TIMER_ERROR; TimerErrorType timer_error_type_ = TimerErrorType::NO_TIMER_ERROR;

View File

@@ -180,10 +180,12 @@ void OpenThreadSrpComponent::setup() {
entry->mService.mNumTxtEntries = service.txt_records.size(); entry->mService.mNumTxtEntries = service.txt_records.size();
for (size_t i = 0; i < service.txt_records.size(); i++) { for (size_t i = 0; i < service.txt_records.size(); i++) {
const auto &txt = service.txt_records[i]; const auto &txt = service.txt_records[i];
auto value = const_cast<TemplatableValue<std::string> &>(txt.value).value(); // Value is either a compile-time string literal in flash or a pointer to dynamic_txt_values_
// OpenThread SRP client expects the data to persist, so we strdup it
const char *value_str = MDNS_STR_ARG(txt.value);
txt_entries[i].mKey = MDNS_STR_ARG(txt.key); txt_entries[i].mKey = MDNS_STR_ARG(txt.key);
txt_entries[i].mValue = reinterpret_cast<const uint8_t *>(strdup(value.c_str())); txt_entries[i].mValue = reinterpret_cast<const uint8_t *>(strdup(value_str));
txt_entries[i].mValueLength = value.size(); txt_entries[i].mValueLength = strlen(value_str);
} }
entry->mService.mTxtEntries = txt_entries; entry->mService.mTxtEntries = txt_entries;
entry->mService.mNumTxtEntries = service.txt_records.size(); entry->mService.mNumTxtEntries = service.txt_records.size();

View File

@@ -347,7 +347,7 @@ def final_validate_device_schema(
def validate_pin(opt, device): def validate_pin(opt, device):
def validator(value): def validator(value):
if opt in device: if opt in device and not CORE.testing_mode:
raise cv.Invalid( raise cv.Invalid(
f"The uart {opt} is used both by {name} and {device[opt]}, " f"The uart {opt} is used both by {name} and {device[opt]}, "
f"but can only be used by one. Please create a new uart bus for {name}." f"but can only be used by one. Please create a new uart bus for {name}."

View File

@@ -9,6 +9,7 @@ from esphome.components.esp32 import (
import esphome.config_validation as cv import esphome.config_validation as cv
from esphome.const import CONF_DEVICES, CONF_ID from esphome.const import CONF_DEVICES, CONF_ID
from esphome.cpp_types import Component from esphome.cpp_types import Component
from esphome.types import ConfigType
AUTO_LOAD = ["bytebuffer"] AUTO_LOAD = ["bytebuffer"]
CODEOWNERS = ["@clydebarrow"] CODEOWNERS = ["@clydebarrow"]
@@ -20,6 +21,7 @@ USBClient = usb_host_ns.class_("USBClient", Component)
CONF_VID = "vid" CONF_VID = "vid"
CONF_PID = "pid" CONF_PID = "pid"
CONF_ENABLE_HUBS = "enable_hubs" CONF_ENABLE_HUBS = "enable_hubs"
CONF_MAX_TRANSFER_REQUESTS = "max_transfer_requests"
def usb_device_schema(cls=USBClient, vid: int = None, pid: [int] = None) -> cv.Schema: def usb_device_schema(cls=USBClient, vid: int = None, pid: [int] = None) -> cv.Schema:
@@ -44,6 +46,9 @@ CONFIG_SCHEMA = cv.All(
{ {
cv.GenerateID(): cv.declare_id(USBHost), cv.GenerateID(): cv.declare_id(USBHost),
cv.Optional(CONF_ENABLE_HUBS, default=False): cv.boolean, cv.Optional(CONF_ENABLE_HUBS, default=False): cv.boolean,
cv.Optional(CONF_MAX_TRANSFER_REQUESTS, default=16): cv.int_range(
min=1, max=32
),
cv.Optional(CONF_DEVICES): cv.ensure_list(usb_device_schema()), cv.Optional(CONF_DEVICES): cv.ensure_list(usb_device_schema()),
} }
), ),
@@ -58,10 +63,14 @@ async def register_usb_client(config):
return var return var
async def to_code(config): async def to_code(config: ConfigType) -> None:
add_idf_sdkconfig_option("CONFIG_USB_HOST_CONTROL_TRANSFER_MAX_SIZE", 1024) add_idf_sdkconfig_option("CONFIG_USB_HOST_CONTROL_TRANSFER_MAX_SIZE", 1024)
if config.get(CONF_ENABLE_HUBS): if config.get(CONF_ENABLE_HUBS):
add_idf_sdkconfig_option("CONFIG_USB_HOST_HUBS_SUPPORTED", True) add_idf_sdkconfig_option("CONFIG_USB_HOST_HUBS_SUPPORTED", True)
max_requests = config[CONF_MAX_TRANSFER_REQUESTS]
cg.add_define("USB_HOST_MAX_REQUESTS", max_requests)
var = cg.new_Pvariable(config[CONF_ID]) var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config) await cg.register_component(var, config)
for device in config.get(CONF_DEVICES) or (): for device in config.get(CONF_DEVICES) or ():

View File

@@ -2,6 +2,7 @@
// Should not be needed, but it's required to pass CI clang-tidy checks // Should not be needed, but it's required to pass CI clang-tidy checks
#if defined(USE_ESP32_VARIANT_ESP32S2) || defined(USE_ESP32_VARIANT_ESP32S3) || defined(USE_ESP32_VARIANT_ESP32P4) #if defined(USE_ESP32_VARIANT_ESP32S2) || defined(USE_ESP32_VARIANT_ESP32S3) || defined(USE_ESP32_VARIANT_ESP32P4)
#include "esphome/core/defines.h"
#include "esphome/core/component.h" #include "esphome/core/component.h"
#include <vector> #include <vector>
#include "usb/usb_host.h" #include "usb/usb_host.h"
@@ -16,23 +17,25 @@ namespace usb_host {
// THREADING MODEL: // THREADING MODEL:
// This component uses a dedicated USB task for event processing to prevent data loss. // This component uses a dedicated USB task for event processing to prevent data loss.
// - USB Task (high priority): Handles USB events, executes transfer callbacks // - USB Task (high priority): Handles USB events, executes transfer callbacks, releases transfer slots
// - Main Loop Task: Initiates transfers, processes completion events // - Main Loop Task: Initiates transfers, processes device connect/disconnect events
// //
// Thread-safe communication: // Thread-safe communication:
// - Lock-free queues for USB task -> main loop events (SPSC pattern) // - Lock-free queues for USB task -> main loop events (SPSC pattern)
// - Lock-free TransferRequest pool using atomic bitmask (MCSP pattern) // - Lock-free TransferRequest pool using atomic bitmask (MCMP pattern - multi-consumer, multi-producer)
// //
// TransferRequest pool access pattern: // TransferRequest pool access pattern:
// - get_trq_() [allocate]: Called from BOTH USB task and main loop threads // - get_trq_() [allocate]: Called from BOTH USB task and main loop threads
// * USB task: via USB UART input callbacks that restart transfers immediately // * USB task: via USB UART input callbacks that restart transfers immediately
// * Main loop: for output transfers and flow-controlled input restarts // * Main loop: for output transfers and flow-controlled input restarts
// - release_trq() [deallocate]: Called from main loop thread only // - release_trq() [deallocate]: Called from BOTH USB task and main loop threads
// * USB task: immediately after transfer callback completes (critical for preventing slot exhaustion)
// * Main loop: when transfer submission fails
// //
// The multi-threaded allocation is intentional for performance: // The multi-threaded allocation/deallocation is intentional for performance:
// - USB task can immediately restart input transfers without context switching // - USB task can immediately restart input transfers and release slots without context switching
// - Main loop controls backpressure by deciding when to restart after consuming data // - Main loop controls backpressure by deciding when to restart after consuming data
// The atomic bitmask ensures thread-safe allocation without mutex blocking. // The atomic bitmask ensures thread-safe allocation/deallocation without mutex blocking.
static const char *const TAG = "usb_host"; static const char *const TAG = "usb_host";
@@ -52,8 +55,17 @@ static const uint8_t USB_DIR_IN = 1 << 7;
static const uint8_t USB_DIR_OUT = 0; static const uint8_t USB_DIR_OUT = 0;
static const size_t SETUP_PACKET_SIZE = 8; static const size_t SETUP_PACKET_SIZE = 8;
static const size_t MAX_REQUESTS = 16; // maximum number of outstanding requests possible. static const size_t MAX_REQUESTS = USB_HOST_MAX_REQUESTS; // maximum number of outstanding requests possible.
static_assert(MAX_REQUESTS <= 16, "MAX_REQUESTS must be <= 16 to fit in uint16_t bitmask"); static_assert(MAX_REQUESTS >= 1 && MAX_REQUESTS <= 32, "MAX_REQUESTS must be between 1 and 32");
// Select appropriate bitmask type for tracking allocation of TransferRequest slots.
// The bitmask must have at least as many bits as MAX_REQUESTS, so:
// - Use uint16_t for up to 16 requests (MAX_REQUESTS <= 16)
// - Use uint32_t for 17-32 requests (MAX_REQUESTS > 16)
// This is tied to the static_assert above, which enforces MAX_REQUESTS is between 1 and 32.
// If MAX_REQUESTS is increased above 32, this logic and the static_assert must be updated.
using trq_bitmask_t = std::conditional<(MAX_REQUESTS <= 16), uint16_t, uint32_t>::type;
static constexpr size_t USB_EVENT_QUEUE_SIZE = 32; // Size of event queue between USB task and main loop static constexpr size_t USB_EVENT_QUEUE_SIZE = 32; // Size of event queue between USB task and main loop
static constexpr size_t USB_TASK_STACK_SIZE = 4096; // Stack size for USB task (same as ESP-IDF USB examples) static constexpr size_t USB_TASK_STACK_SIZE = 4096; // Stack size for USB task (same as ESP-IDF USB examples)
static constexpr UBaseType_t USB_TASK_PRIORITY = 5; // Higher priority than main loop (tskIDLE_PRIORITY + 5) static constexpr UBaseType_t USB_TASK_PRIORITY = 5; // Higher priority than main loop (tskIDLE_PRIORITY + 5)
@@ -83,8 +95,6 @@ struct TransferRequest {
enum EventType : uint8_t { enum EventType : uint8_t {
EVENT_DEVICE_NEW, EVENT_DEVICE_NEW,
EVENT_DEVICE_GONE, EVENT_DEVICE_GONE,
EVENT_TRANSFER_COMPLETE,
EVENT_CONTROL_COMPLETE,
}; };
struct UsbEvent { struct UsbEvent {
@@ -96,9 +106,6 @@ struct UsbEvent {
struct { struct {
usb_device_handle_t handle; usb_device_handle_t handle;
} device_gone; } device_gone;
struct {
TransferRequest *trq;
} transfer;
} data; } data;
// Required for EventPool - no cleanup needed for POD types // Required for EventPool - no cleanup needed for POD types
@@ -163,10 +170,9 @@ class USBClient : public Component {
uint16_t pid_{}; uint16_t pid_{};
// Lock-free pool management using atomic bitmask (no dynamic allocation) // Lock-free pool management using atomic bitmask (no dynamic allocation)
// Bit i = 1: requests_[i] is in use, Bit i = 0: requests_[i] is available // Bit i = 1: requests_[i] is in use, Bit i = 0: requests_[i] is available
// Supports multiple concurrent consumers (both threads can allocate) // Supports multiple concurrent consumers and producers (both threads can allocate/deallocate)
// Single producer for deallocation (main loop only) // Bitmask type automatically selected: uint16_t for <= 16 slots, uint32_t for 17-32 slots
// Limited to 16 slots by uint16_t size (enforced by static_assert) std::atomic<trq_bitmask_t> trq_in_use_;
std::atomic<uint16_t> trq_in_use_;
TransferRequest requests_[MAX_REQUESTS]{}; TransferRequest requests_[MAX_REQUESTS]{};
}; };
class USBHost : public Component { class USBHost : public Component {

View File

@@ -228,12 +228,6 @@ void USBClient::loop() {
case EVENT_DEVICE_GONE: case EVENT_DEVICE_GONE:
this->on_removed(event->data.device_gone.handle); this->on_removed(event->data.device_gone.handle);
break; break;
case EVENT_TRANSFER_COMPLETE:
case EVENT_CONTROL_COMPLETE: {
auto *trq = event->data.transfer.trq;
this->release_trq(trq);
break;
}
} }
// Return event to pool for reuse // Return event to pool for reuse
this->event_pool.release(event); this->event_pool.release(event);
@@ -313,25 +307,6 @@ void USBClient::on_removed(usb_device_handle_t handle) {
} }
} }
// Helper to queue transfer cleanup to main loop
static void queue_transfer_cleanup(TransferRequest *trq, EventType type) {
auto *client = trq->client;
// Allocate event from pool
UsbEvent *event = client->event_pool.allocate();
if (event == nullptr) {
// No events available - increment counter for periodic logging
client->event_queue.increment_dropped_count();
return;
}
event->type = type;
event->data.transfer.trq = trq;
// Push to lock-free queue (always succeeds since pool size == queue size)
client->event_queue.push(event);
}
// CALLBACK CONTEXT: USB task (called from usb_host_client_handle_events in USB task) // CALLBACK CONTEXT: USB task (called from usb_host_client_handle_events in USB task)
static void control_callback(const usb_transfer_t *xfer) { static void control_callback(const usb_transfer_t *xfer) {
auto *trq = static_cast<TransferRequest *>(xfer->context); auto *trq = static_cast<TransferRequest *>(xfer->context);
@@ -346,8 +321,9 @@ static void control_callback(const usb_transfer_t *xfer) {
trq->callback(trq->status); trq->callback(trq->status);
} }
// Queue cleanup to main loop // Release transfer slot immediately in USB task
queue_transfer_cleanup(trq, EVENT_CONTROL_COMPLETE); // The release_trq() uses thread-safe atomic operations
trq->client->release_trq(trq);
} }
// THREAD CONTEXT: Called from both USB task and main loop threads (multi-consumer) // THREAD CONTEXT: Called from both USB task and main loop threads (multi-consumer)
@@ -358,20 +334,20 @@ static void control_callback(const usb_transfer_t *xfer) {
// This multi-threaded access is intentional for performance - USB task can // This multi-threaded access is intentional for performance - USB task can
// immediately restart transfers without waiting for main loop scheduling. // immediately restart transfers without waiting for main loop scheduling.
TransferRequest *USBClient::get_trq_() { TransferRequest *USBClient::get_trq_() {
uint16_t mask = this->trq_in_use_.load(std::memory_order_relaxed); trq_bitmask_t mask = this->trq_in_use_.load(std::memory_order_relaxed);
// Find first available slot (bit = 0) and try to claim it atomically // Find first available slot (bit = 0) and try to claim it atomically
// We use a while loop to allow retrying the same slot after CAS failure // We use a while loop to allow retrying the same slot after CAS failure
size_t i = 0; size_t i = 0;
while (i != MAX_REQUESTS) { while (i != MAX_REQUESTS) {
if (mask & (1U << i)) { if (mask & (static_cast<trq_bitmask_t>(1) << i)) {
// Slot is in use, move to next slot // Slot is in use, move to next slot
i++; i++;
continue; continue;
} }
// Slot i appears available, try to claim it atomically // Slot i appears available, try to claim it atomically
uint16_t desired = mask | (1U << i); // Set bit i to mark as in-use trq_bitmask_t desired = mask | (static_cast<trq_bitmask_t>(1) << i); // Set bit i to mark as in-use
if (this->trq_in_use_.compare_exchange_weak(mask, desired, std::memory_order_acquire, std::memory_order_relaxed)) { if (this->trq_in_use_.compare_exchange_weak(mask, desired, std::memory_order_acquire, std::memory_order_relaxed)) {
// Successfully claimed slot i - prepare the TransferRequest // Successfully claimed slot i - prepare the TransferRequest
@@ -386,7 +362,7 @@ TransferRequest *USBClient::get_trq_() {
i = 0; i = 0;
} }
ESP_LOGE(TAG, "All %d transfer slots in use", MAX_REQUESTS); ESP_LOGE(TAG, "All %zu transfer slots in use", MAX_REQUESTS);
return nullptr; return nullptr;
} }
void USBClient::disconnect() { void USBClient::disconnect() {
@@ -452,8 +428,11 @@ static void transfer_callback(usb_transfer_t *xfer) {
trq->callback(trq->status); trq->callback(trq->status);
} }
// Queue cleanup to main loop // Release transfer slot AFTER callback completes to prevent slot exhaustion
queue_transfer_cleanup(trq, EVENT_TRANSFER_COMPLETE); // This is critical for high-throughput transfers (e.g., USB UART at 115200 baud)
// The callback has finished accessing xfer->data_buffer, so it's safe to release
// The release_trq() uses thread-safe atomic operations
trq->client->release_trq(trq);
} }
/** /**
* Performs a transfer input operation. * Performs a transfer input operation.
@@ -521,12 +500,12 @@ void USBClient::dump_config() {
" Product id %04X", " Product id %04X",
this->vid_, this->pid_); this->vid_, this->pid_);
} }
// THREAD CONTEXT: Only called from main loop thread (single producer for deallocation) // THREAD CONTEXT: Called from both USB task and main loop threads
// - Via event processing when handling EVENT_TRANSFER_COMPLETE/EVENT_CONTROL_COMPLETE // - USB task: Immediately after transfer callback completes
// - Directly when transfer submission fails // - Main loop: When transfer submission fails
// //
// THREAD SAFETY: Lock-free using atomic AND to clear bit // THREAD SAFETY: Lock-free using atomic AND to clear bit
// Single-producer pattern makes this simpler than allocation // Thread-safe atomic operation allows multi-threaded deallocation
void USBClient::release_trq(TransferRequest *trq) { void USBClient::release_trq(TransferRequest *trq) {
if (trq == nullptr) if (trq == nullptr)
return; return;
@@ -540,8 +519,8 @@ void USBClient::release_trq(TransferRequest *trq) {
// Atomically clear bit i to mark slot as available // Atomically clear bit i to mark slot as available
// fetch_and with inverted bitmask clears the bit atomically // fetch_and with inverted bitmask clears the bit atomically
uint16_t bit = 1U << index; trq_bitmask_t bit = static_cast<trq_bitmask_t>(1) << index;
this->trq_in_use_.fetch_and(static_cast<uint16_t>(~bit), std::memory_order_release); this->trq_in_use_.fetch_and(static_cast<trq_bitmask_t>(~bit), std::memory_order_release);
} }
} // namespace usb_host } // namespace usb_host

View File

@@ -576,8 +576,9 @@ __attribute__((noinline)) static void log_scan_result(const WiFiScanResult &res)
format_mac_addr_upper(bssid.data(), bssid_s); format_mac_addr_upper(bssid.data(), bssid_s);
if (res.get_matches()) { if (res.get_matches()) {
ESP_LOGI(TAG, "- '%s' %s" LOG_SECRET("(%s) ") "%s", res.get_ssid().c_str(), res.get_is_hidden() ? "(HIDDEN) " : "", ESP_LOGI(TAG, "- '%s' %s" LOG_SECRET("(%s) ") "%s", res.get_ssid().c_str(),
bssid_s, LOG_STR_ARG(get_signal_bars(res.get_rssi()))); res.get_is_hidden() ? LOG_STR_LITERAL("(HIDDEN) ") : LOG_STR_LITERAL(""), bssid_s,
LOG_STR_ARG(get_signal_bars(res.get_rssi())));
ESP_LOGD(TAG, ESP_LOGD(TAG,
" Channel: %u\n" " Channel: %u\n"
" RSSI: %d dB", " RSSI: %d dB",

View File

@@ -4,7 +4,7 @@ from enum import Enum
from esphome.enum import StrEnum from esphome.enum import StrEnum
__version__ = "2025.10.0b1" __version__ = "2025.10.0b2"
ALLOWED_NAME_CHARS = "abcdefghijklmnopqrstuvwxyz0123456789-_" ALLOWED_NAME_CHARS = "abcdefghijklmnopqrstuvwxyz0123456789-_"
VALID_SUBSTITUTIONS_CHARACTERS = ( VALID_SUBSTITUTIONS_CHARACTERS = (

View File

@@ -529,6 +529,8 @@ class EsphomeCore:
self.dashboard = False self.dashboard = False
# True if command is run from vscode api # True if command is run from vscode api
self.vscode = False self.vscode = False
# True if running in testing mode (disables validation checks for grouped testing)
self.testing_mode = False
# The name of the node # The name of the node
self.name: str | None = None self.name: str | None = None
# The friendly name of the node # The friendly name of the node

View File

@@ -84,6 +84,7 @@
#define USE_LVGL_TOUCHSCREEN #define USE_LVGL_TOUCHSCREEN
#define USE_MDNS #define USE_MDNS
#define MDNS_SERVICE_COUNT 3 #define MDNS_SERVICE_COUNT 3
#define MDNS_DYNAMIC_TXT_COUNT 3
#define USE_MEDIA_PLAYER #define USE_MEDIA_PLAYER
#define USE_NEXTION_TFT_UPLOAD #define USE_NEXTION_TFT_UPLOAD
#define USE_NUMBER #define USE_NUMBER
@@ -190,6 +191,7 @@
#define USE_WEBSERVER_PORT 80 // NOLINT #define USE_WEBSERVER_PORT 80 // NOLINT
#define USE_WEBSERVER_SORTING #define USE_WEBSERVER_SORTING
#define USE_WIFI_11KV_SUPPORT #define USE_WIFI_11KV_SUPPORT
#define USB_HOST_MAX_REQUESTS 16
#ifdef USE_ARDUINO #ifdef USE_ARDUINO
#define USE_ARDUINO_VERSION_CODE VERSION_CODE(3, 2, 1) #define USE_ARDUINO_VERSION_CODE VERSION_CODE(3, 2, 1)

View File

@@ -246,12 +246,15 @@ def entity_duplicate_validator(platform: str) -> Callable[[ConfigType], ConfigTy
"\n to distinguish them" "\n to distinguish them"
) )
raise cv.Invalid( # Skip duplicate entity name validation when testing_mode is enabled
f"Duplicate {platform} entity with name '{entity_name}' found{device_prefix}. " # This flag is used for grouped component testing
f"{conflict_msg}. " if not CORE.testing_mode:
"Each entity on a device must have a unique name within its platform." raise cv.Invalid(
f"{sanitized_msg}" f"Duplicate {platform} entity with name '{entity_name}' found{device_prefix}. "
) f"{conflict_msg}. "
"Each entity on a device must have a unique name within its platform."
f"{sanitized_msg}"
)
# Store metadata about this entity # Store metadata about this entity
entity_metadata: EntityMetadata = { entity_metadata: EntityMetadata = {

View File

@@ -410,7 +410,7 @@ def run_ota_impl_(
af, socktype, _, _, sa = r af, socktype, _, _, sa = r
_LOGGER.info("Connecting to %s port %s...", sa[0], sa[1]) _LOGGER.info("Connecting to %s port %s...", sa[0], sa[1])
sock = socket.socket(af, socktype) sock = socket.socket(af, socktype)
sock.settimeout(10.0) sock.settimeout(20.0)
try: try:
sock.connect(sa) sock.connect(sa)
except OSError as err: except OSError as err:

View File

@@ -118,11 +118,11 @@ class PinRegistry(dict):
parent_config = fconf.get_config_for_path(parent_path) parent_config = fconf.get_config_for_path(parent_path)
final_val_fun(pin_config, parent_config) final_val_fun(pin_config, parent_config)
allow_others = pin_config.get(CONF_ALLOW_OTHER_USES, False) allow_others = pin_config.get(CONF_ALLOW_OTHER_USES, False)
if count != 1 and not allow_others: if count != 1 and not allow_others and not CORE.testing_mode:
raise cv.Invalid( raise cv.Invalid(
f"Pin {pin_config[CONF_NUMBER]} is used in multiple places" f"Pin {pin_config[CONF_NUMBER]} is used in multiple places"
) )
if count == 1 and allow_others: if count == 1 and allow_others and not CORE.testing_mode:
raise cv.Invalid( raise cv.Invalid(
f"Pin {pin_config[CONF_NUMBER]} incorrectly sets {CONF_ALLOW_OTHER_USES}: true" f"Pin {pin_config[CONF_NUMBER]} incorrectly sets {CONF_ALLOW_OTHER_USES}: true"
) )

View File

@@ -5,6 +5,7 @@ import os
from pathlib import Path from pathlib import Path
import re import re
import subprocess import subprocess
from typing import Any
from esphome.const import CONF_COMPILE_PROCESS_LIMIT, CONF_ESPHOME, KEY_CORE from esphome.const import CONF_COMPILE_PROCESS_LIMIT, CONF_ESPHOME, KEY_CORE
from esphome.core import CORE, EsphomeError from esphome.core import CORE, EsphomeError
@@ -42,6 +43,35 @@ def patch_structhash():
cli.clean_build_dir = patched_clean_build_dir cli.clean_build_dir = patched_clean_build_dir
def patch_file_downloader():
"""Patch PlatformIO's FileDownloader to retry on PackageException errors."""
from platformio.package.download import FileDownloader
from platformio.package.exception import PackageException
original_init = FileDownloader.__init__
def patched_init(self, *args: Any, **kwargs: Any) -> None:
max_retries = 3
for attempt in range(max_retries):
try:
return original_init(self, *args, **kwargs)
except PackageException as e:
if attempt < max_retries - 1:
_LOGGER.warning(
"Package download failed: %s. Retrying... (attempt %d/%d)",
str(e),
attempt + 1,
max_retries,
)
else:
# Final attempt - re-raise
raise
return None
FileDownloader.__init__ = patched_init
IGNORE_LIB_WARNINGS = f"(?:{'|'.join(['Hash', 'Update'])})" IGNORE_LIB_WARNINGS = f"(?:{'|'.join(['Hash', 'Update'])})"
FILTER_PLATFORMIO_LINES = [ FILTER_PLATFORMIO_LINES = [
r"Verbose mode can be enabled via `-v, --verbose` option.*", r"Verbose mode can be enabled via `-v, --verbose` option.*",
@@ -99,6 +129,7 @@ def run_platformio_cli(*args, **kwargs) -> str | int:
import platformio.__main__ import platformio.__main__
patch_structhash() patch_structhash()
patch_file_downloader()
return run_external_command(platformio.__main__.main, *cmd, **kwargs) return run_external_command(platformio.__main__.main, *cmd, **kwargs)

View File

@@ -11,8 +11,8 @@ pyserial==3.5
platformio==6.1.18 # When updating platformio, also update /docker/Dockerfile platformio==6.1.18 # When updating platformio, also update /docker/Dockerfile
esptool==5.1.0 esptool==5.1.0
click==8.1.7 click==8.1.7
esphome-dashboard==20250904.0 esphome-dashboard==20251009.0
aioesphomeapi==41.13.0 aioesphomeapi==41.14.0
zeroconf==0.148.0 zeroconf==0.148.0
puremagic==1.30 puremagic==1.30
ruamel.yaml==0.18.15 # dashboard_import ruamel.yaml==0.18.15 # dashboard_import

523
script/analyze_component_buses.py Executable file
View File

@@ -0,0 +1,523 @@
#!/usr/bin/env python3
"""Analyze component test files to detect which common bus configs they use.
This script scans component test files and extracts which common bus configurations
(i2c, spi, uart, etc.) are included via the packages mechanism. This information
is used to group components that can be tested together.
Components can only be grouped together if they use the EXACT SAME set of common
bus configurations, ensuring that merged configs are compatible.
Example output:
{
"component1": {
"esp32-ard": ["i2c", "uart_19200"],
"esp32-idf": ["i2c", "uart_19200"]
},
"component2": {
"esp32-ard": ["spi"],
"esp32-idf": ["spi"]
}
}
"""
from __future__ import annotations
import argparse
from functools import lru_cache
import json
from pathlib import Path
import re
import sys
from typing import Any
# Add esphome to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from esphome import yaml_util
from esphome.config_helpers import Extend, Remove
# Path to common bus configs
COMMON_BUS_PATH = Path("tests/test_build_components/common")
# Package dependencies - maps packages to the packages they include
# When a component uses a package on the left, it automatically gets
# the packages on the right as well
PACKAGE_DEPENDENCIES = {
"modbus": ["uart"], # modbus packages include uart packages
# Add more package dependencies here as needed
}
# Bus types that can be defined directly in config files
# Components defining these directly cannot be grouped (they create unique bus IDs)
DIRECT_BUS_TYPES = ("i2c", "spi", "uart", "modbus")
# Signature for components with no bus requirements
# These components can be merged with any other group
NO_BUSES_SIGNATURE = "no_buses"
# Base bus components - these ARE the bus implementations and should not
# be flagged as needing migration since they are the platform/base components
BASE_BUS_COMPONENTS = {
"i2c",
"spi",
"uart",
"modbus",
"canbus",
}
# Components that must be tested in isolation (not grouped or batched with others)
# These have known build issues that prevent grouping
# NOTE: This should be kept in sync with both test_build_components and split_components_for_ci.py
ISOLATED_COMPONENTS = {
"animation": "Has display lambda in common.yaml that requires existing display platform - breaks when merged without display",
"esphome": "Defines devices/areas in esphome: section that are referenced in other sections - breaks when merged",
"ethernet": "Defines ethernet: which conflicts with wifi: used by most components",
"ethernet_info": "Related to ethernet component which conflicts with wifi",
"lvgl": "Defines multiple SDL displays on host platform that conflict when merged with other display configs",
"matrix_keypad": "Needs isolation due to keypad",
"mcp4725": "no YAML config to specify i2c bus id",
"mcp47a1": "no YAML config to specify i2c bus id",
"modbus_controller": "Defines multiple modbus buses for testing client/server functionality - conflicts with package modbus bus",
"neopixelbus": "RMT type conflict with ESP32 Arduino/ESP-IDF headers (enum vs struct rmt_channel_t)",
"packages": "cannot merge packages",
}
@lru_cache(maxsize=1)
def get_common_bus_packages() -> frozenset[str]:
"""Get the list of common bus package names.
Reads from tests/test_build_components/common/ directory
and caches the result. All bus types support component grouping
for config validation since --testing-mode bypasses runtime conflicts.
Returns:
Frozenset of common bus package names (i2c, spi, uart, etc.)
"""
if not COMMON_BUS_PATH.exists():
return frozenset()
# List all directories in common/ - these are the bus package names
return frozenset(d.name for d in COMMON_BUS_PATH.iterdir() if d.is_dir())
def uses_local_file_references(component_dir: Path) -> bool:
"""Check if a component uses local file references via $component_dir.
Components that reference local files cannot be grouped because each needs
a unique component_dir path pointing to their specific directory.
Args:
component_dir: Path to the component's test directory
Returns:
True if the component uses $component_dir for local file references
"""
common_yaml = component_dir / "common.yaml"
if not common_yaml.exists():
return False
try:
content = common_yaml.read_text()
except Exception: # pylint: disable=broad-exception-caught
return False
# Pattern to match $component_dir or ${component_dir} references
# These indicate local file usage that prevents grouping
return bool(re.search(r"\$\{?component_dir\}?", content))
def is_platform_component(component_dir: Path) -> bool:
"""Check if a component is a platform component (abstract base class).
Platform components have IS_PLATFORM_COMPONENT = True and cannot be
instantiated without a platform-specific implementation. These components
define abstract methods and cause linker errors if compiled standalone.
Examples: canbus, mcp23x08_base, mcp23x17_base
Args:
component_dir: Path to the component's test directory
Returns:
True if this is a platform component
"""
# Check in the actual component source, not tests
# tests/components/X -> tests/components -> tests -> repo root
repo_root = component_dir.parent.parent.parent
comp_init = (
repo_root / "esphome" / "components" / component_dir.name / "__init__.py"
)
if not comp_init.exists():
return False
try:
content = comp_init.read_text()
return "IS_PLATFORM_COMPONENT = True" in content
except Exception: # pylint: disable=broad-exception-caught
return False
def _contains_extend_or_remove(data: Any) -> bool:
"""Recursively check if data contains Extend or Remove objects.
Args:
data: Parsed YAML data structure
Returns:
True if any Extend or Remove objects are found
"""
if isinstance(data, (Extend, Remove)):
return True
if isinstance(data, dict):
for value in data.values():
if _contains_extend_or_remove(value):
return True
if isinstance(data, list):
for item in data:
if _contains_extend_or_remove(item):
return True
return False
def analyze_yaml_file(yaml_file: Path) -> dict[str, Any]:
"""Load a YAML file once and extract all needed information.
This loads the YAML file a single time and extracts all information needed
for component analysis, avoiding multiple file reads.
Args:
yaml_file: Path to the YAML file to analyze
Returns:
Dictionary with keys:
- buses: set of common bus package names
- has_extend_remove: bool indicating if Extend/Remove objects are present
- has_direct_bus_config: bool indicating if buses are defined directly (not via packages)
- loaded: bool indicating if file was successfully loaded
"""
result = {
"buses": set(),
"has_extend_remove": False,
"has_direct_bus_config": False,
"loaded": False,
}
if not yaml_file.exists():
return result
try:
data = yaml_util.load_yaml(yaml_file)
result["loaded"] = True
except Exception: # pylint: disable=broad-exception-caught
return result
# Check for Extend/Remove objects
result["has_extend_remove"] = _contains_extend_or_remove(data)
# Check if buses are defined directly (not via packages)
# Components that define i2c, spi, uart, or modbus directly in test files
# cannot be grouped because they create unique bus IDs
if isinstance(data, dict):
for bus_type in DIRECT_BUS_TYPES:
if bus_type in data:
result["has_direct_bus_config"] = True
break
# Extract common bus packages
if not isinstance(data, dict) or "packages" not in data:
return result
packages = data["packages"]
if not isinstance(packages, dict):
return result
valid_buses = get_common_bus_packages()
for pkg_name in packages:
if pkg_name not in valid_buses:
continue
result["buses"].add(pkg_name)
# Add any package dependencies (e.g., modbus includes uart)
if pkg_name not in PACKAGE_DEPENDENCIES:
continue
for dep in PACKAGE_DEPENDENCIES[pkg_name]:
if dep not in valid_buses:
continue
result["buses"].add(dep)
return result
def analyze_component(component_dir: Path) -> tuple[dict[str, list[str]], bool, bool]:
"""Analyze a component directory to find which buses each platform uses.
Args:
component_dir: Path to the component's test directory
Returns:
Tuple of:
- Dictionary mapping platform to list of bus configs
Example: {"esp32-ard": ["i2c", "spi"], "esp32-idf": ["i2c"]}
- Boolean indicating if component uses !extend or !remove
- Boolean indicating if component defines buses directly (not via packages)
"""
if not component_dir.is_dir():
return {}, False, False
platform_buses = {}
has_extend_remove = False
has_direct_bus_config = False
# Analyze all YAML files in the component directory
for yaml_file in component_dir.glob("*.yaml"):
analysis = analyze_yaml_file(yaml_file)
# Track if any file uses extend/remove
if analysis["has_extend_remove"]:
has_extend_remove = True
# Track if any file defines buses directly
if analysis["has_direct_bus_config"]:
has_direct_bus_config = True
# For test.*.yaml files, extract platform and buses
if yaml_file.name.startswith("test.") and yaml_file.suffix == ".yaml":
# Extract platform name (e.g., test.esp32-ard.yaml -> esp32-ard)
platform = yaml_file.stem.replace("test.", "")
# Always add platform, even if it has no buses (empty list)
# This allows grouping components that don't use any shared buses
platform_buses[platform] = (
sorted(analysis["buses"]) if analysis["buses"] else []
)
return platform_buses, has_extend_remove, has_direct_bus_config
def analyze_all_components(
tests_dir: Path = None,
) -> tuple[dict[str, dict[str, list[str]]], set[str], set[str]]:
"""Analyze all component test directories.
Args:
tests_dir: Path to tests/components directory (defaults to auto-detect)
Returns:
Tuple of:
- Dictionary mapping component name to platform->buses mapping
- Set of component names that cannot be grouped
- Set of component names that define buses directly (need migration warning)
"""
if tests_dir is None:
tests_dir = Path("tests/components")
if not tests_dir.exists():
print(f"Error: {tests_dir} does not exist", file=sys.stderr)
return {}, set(), set()
components = {}
non_groupable = set()
direct_bus_components = set()
for component_dir in sorted(tests_dir.iterdir()):
if not component_dir.is_dir():
continue
component_name = component_dir.name
platform_buses, has_extend_remove, has_direct_bus_config = analyze_component(
component_dir
)
if platform_buses:
components[component_name] = platform_buses
# Note: Components using $component_dir are now groupable because the merge
# script rewrites these to absolute paths with component-specific substitutions
# Check if component is explicitly isolated
# These have known issues that prevent grouping with other components
if component_name in ISOLATED_COMPONENTS:
non_groupable.add(component_name)
# Check if component is a base bus component
# These ARE the bus platform implementations and define buses directly for testing
# They cannot be grouped with components that use bus packages (causes ID conflicts)
if component_name in BASE_BUS_COMPONENTS:
non_groupable.add(component_name)
# Check if component uses !extend or !remove directives
# These rely on specific config structure and cannot be merged with other components
# The directives work within a component's own package hierarchy but break when
# merging independent components together
if has_extend_remove:
non_groupable.add(component_name)
# Check if component defines buses directly in test files
# These create unique bus IDs and cause conflicts when merged
# Exclude base bus components (i2c, spi, uart, etc.) since they ARE the platform
if has_direct_bus_config and component_name not in BASE_BUS_COMPONENTS:
non_groupable.add(component_name)
direct_bus_components.add(component_name)
return components, non_groupable, direct_bus_components
def create_grouping_signature(
platform_buses: dict[str, list[str]], platform: str
) -> str:
"""Create a signature string for grouping components.
Components with the same signature can be grouped together for testing.
All valid bus types can be grouped since --testing-mode bypasses runtime
conflicts during config validation.
Args:
platform_buses: Mapping of platform to list of buses
platform: The specific platform to create signature for
Returns:
Signature string (e.g., "i2c" or "uart") or empty if no valid buses
"""
buses = platform_buses.get(platform, [])
if not buses:
return ""
# Only include valid bus types in signature
common_buses = get_common_bus_packages()
valid_buses = [b for b in buses if b in common_buses]
if not valid_buses:
return ""
return "+".join(sorted(valid_buses))
def group_components_by_signature(
components: dict[str, dict[str, list[str]]], platform: str
) -> dict[str, list[str]]:
"""Group components by their bus signature for a specific platform.
Args:
components: Component analysis results from analyze_all_components()
platform: Platform to group for (e.g., "esp32-ard")
Returns:
Dictionary mapping signature to list of component names
Example: {"i2c+uart_19200": ["comp1", "comp2"], "spi": ["comp3"]}
"""
signature_groups: dict[str, list[str]] = {}
for component_name, platform_buses in components.items():
if platform not in platform_buses:
continue
signature = create_grouping_signature(platform_buses, platform)
if not signature:
continue
if signature not in signature_groups:
signature_groups[signature] = []
signature_groups[signature].append(component_name)
return signature_groups
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Analyze component test files to detect common bus usage"
)
parser.add_argument(
"--components",
"-c",
nargs="+",
help="Specific components to analyze (default: all)",
)
parser.add_argument(
"--platform",
"-p",
help="Show grouping for a specific platform",
)
parser.add_argument(
"--json",
action="store_true",
help="Output as JSON",
)
parser.add_argument(
"--group",
action="store_true",
help="Show component groupings by bus signature",
)
args = parser.parse_args()
# Analyze components
tests_dir = Path("tests/components")
if args.components:
# Analyze only specified components
components = {}
non_groupable = set()
direct_bus_components = set()
for comp in args.components:
comp_dir = tests_dir / comp
platform_buses, has_extend_remove, has_direct_bus_config = (
analyze_component(comp_dir)
)
if platform_buses:
components[comp] = platform_buses
# Note: Components using $component_dir are now groupable
if comp in ISOLATED_COMPONENTS:
non_groupable.add(comp)
if comp in BASE_BUS_COMPONENTS:
non_groupable.add(comp)
if has_direct_bus_config and comp not in BASE_BUS_COMPONENTS:
non_groupable.add(comp)
direct_bus_components.add(comp)
else:
# Analyze all components
components, non_groupable, direct_bus_components = analyze_all_components(
tests_dir
)
# Output results
if args.group and args.platform:
# Show groupings for a specific platform
groups = group_components_by_signature(components, args.platform)
if args.json:
print(json.dumps(groups, indent=2))
else:
print(f"Component groupings for {args.platform}:")
print()
for signature, comp_list in sorted(groups.items()):
print(f" {signature}:")
for comp in sorted(comp_list):
print(f" - {comp}")
print()
elif args.json:
# JSON output
print(json.dumps(components, indent=2))
else:
# Human-readable output
for component, platform_buses in sorted(components.items()):
non_groupable_marker = (
" [NON-GROUPABLE]" if component in non_groupable else ""
)
print(f"{component}{non_groupable_marker}:")
for platform, buses in sorted(platform_buses.items()):
bus_str = ", ".join(buses)
print(f" {platform}: {bus_str}")
print()
print(f"Total components analyzed: {len(components)}")
if non_groupable:
print(f"Non-groupable components (use local files): {len(non_groupable)}")
for comp in sorted(non_groupable):
print(f" - {comp}")
if __name__ == "__main__":
main()

View File

@@ -237,6 +237,16 @@ def main() -> None:
result = subprocess.run(cmd, capture_output=True, text=True, check=True) result = subprocess.run(cmd, capture_output=True, text=True, check=True)
changed_components = parse_list_components_output(result.stdout) changed_components = parse_list_components_output(result.stdout)
# Filter to only components that have test files
# Components without tests shouldn't generate CI test jobs
tests_dir = Path(root_path) / "tests" / "components"
changed_components_with_tests = [
component
for component in changed_components
if (component_test_dir := tests_dir / component).exists()
and any(component_test_dir.glob("test.*.yaml"))
]
# Build output # Build output
output: dict[str, Any] = { output: dict[str, Any] = {
"integration_tests": run_integration, "integration_tests": run_integration,
@@ -244,7 +254,8 @@ def main() -> None:
"clang_format": run_clang_format, "clang_format": run_clang_format,
"python_linters": run_python_linters, "python_linters": run_python_linters,
"changed_components": changed_components, "changed_components": changed_components,
"component_test_count": len(changed_components), "changed_components_with_tests": changed_components_with_tests,
"component_test_count": len(changed_components_with_tests),
} }
# Output as JSON # Output as JSON

379
script/merge_component_configs.py Executable file
View File

@@ -0,0 +1,379 @@
#!/usr/bin/env python3
"""Merge multiple component test configurations into a single test file.
This script combines multiple component test files that use the same common bus
configurations into a single merged test file. This allows testing multiple
compatible components together, reducing CI build time.
The merger handles:
- Component-specific substitutions (prefixing to avoid conflicts)
- Multiple instances of component configurations
- Shared common bus packages (included only once)
- Platform-specific configurations
- Uses ESPHome's built-in merge_config for proper YAML merging
"""
from __future__ import annotations
import argparse
from pathlib import Path
import re
import sys
from typing import Any
# Add esphome to path so we can import from it
sys.path.insert(0, str(Path(__file__).parent.parent))
from esphome import yaml_util
from esphome.config_helpers import merge_config
from script.analyze_component_buses import PACKAGE_DEPENDENCIES, get_common_bus_packages
def load_yaml_file(yaml_file: Path) -> dict:
"""Load YAML file using ESPHome's YAML loader.
Args:
yaml_file: Path to the YAML file
Returns:
Parsed YAML as dictionary
"""
if not yaml_file.exists():
raise FileNotFoundError(f"YAML file not found: {yaml_file}")
return yaml_util.load_yaml(yaml_file)
def extract_packages_from_yaml(data: dict) -> dict[str, str]:
"""Extract COMMON BUS package includes from parsed YAML.
Only extracts packages that are from test_build_components/common/,
ignoring component-specific packages.
Args:
data: Parsed YAML dictionary
Returns:
Dictionary mapping package name to include path (as string representation)
Only includes common bus packages (i2c, spi, uart, etc.)
"""
if "packages" not in data:
return {}
packages_value = data["packages"]
if not isinstance(packages_value, dict):
# List format doesn't include common bus packages (those use dict format)
return {}
# Get common bus package names (cached)
common_bus_packages = get_common_bus_packages()
packages = {}
# Dictionary format: packages: {name: value}
for name, value in packages_value.items():
# Only include common bus packages, ignore component-specific ones
if name not in common_bus_packages:
continue
packages[name] = str(value)
# Also track package dependencies (e.g., modbus includes uart)
if name not in PACKAGE_DEPENDENCIES:
continue
for dep in PACKAGE_DEPENDENCIES[name]:
if dep not in common_bus_packages:
continue
# Mark as included via dependency
packages[f"_dep_{dep}"] = f"(included via {name})"
return packages
def prefix_substitutions_in_dict(
data: Any, prefix: str, exclude: set[str] | None = None
) -> Any:
"""Recursively prefix all substitution references in a data structure.
Args:
data: YAML data structure (dict, list, or scalar)
prefix: Prefix to add to substitution names
exclude: Set of substitution names to exclude from prefixing
Returns:
Data structure with prefixed substitution references
"""
if exclude is None:
exclude = set()
def replace_sub(text: str) -> str:
"""Replace substitution references in a string."""
def replace_match(match):
sub_name = match.group(1)
if sub_name in exclude:
return match.group(0)
# Always use braced format in output for consistency
return f"${{{prefix}_{sub_name}}}"
# Match both ${substitution} and $substitution formats
return re.sub(r"\$\{?(\w+)\}?", replace_match, text)
if isinstance(data, dict):
result = {}
for key, value in data.items():
result[key] = prefix_substitutions_in_dict(value, prefix, exclude)
return result
if isinstance(data, list):
return [prefix_substitutions_in_dict(item, prefix, exclude) for item in data]
if isinstance(data, str):
return replace_sub(data)
return data
def deduplicate_by_id(data: dict) -> dict:
"""Deduplicate list items with the same ID.
Keeps only the first occurrence of each ID. If items with the same ID
are identical, this silently deduplicates. If they differ, the first
one is kept (ESPHome's validation will catch if this causes issues).
Args:
data: Parsed config dictionary
Returns:
Config with deduplicated lists
"""
if not isinstance(data, dict):
return data
result = {}
for key, value in data.items():
if isinstance(value, list):
# Check for items with 'id' field
seen_ids = set()
deduped_list = []
for item in value:
if isinstance(item, dict) and "id" in item:
item_id = item["id"]
if item_id not in seen_ids:
seen_ids.add(item_id)
deduped_list.append(item)
# else: skip duplicate ID (keep first occurrence)
else:
# No ID, just add it
deduped_list.append(item)
result[key] = deduped_list
elif isinstance(value, dict):
# Recursively deduplicate nested dicts
result[key] = deduplicate_by_id(value)
else:
result[key] = value
return result
def merge_component_configs(
component_names: list[str],
platform: str,
tests_dir: Path,
output_file: Path,
) -> None:
"""Merge multiple component test configs into a single file.
Args:
component_names: List of component names to merge
platform: Platform to merge for (e.g., "esp32-ard")
tests_dir: Path to tests/components directory
output_file: Path to output merged config file
"""
if not component_names:
raise ValueError("No components specified")
# Track packages to ensure they're identical
all_packages = None
# Start with empty config
merged_config_data = {}
# Process each component
for comp_name in component_names:
comp_dir = tests_dir / comp_name
test_file = comp_dir / f"test.{platform}.yaml"
if not test_file.exists():
raise FileNotFoundError(f"Test file not found: {test_file}")
# Load the component's test file
comp_data = load_yaml_file(test_file)
# Validate packages are compatible
# Components with no packages (no_buses) can merge with any group
comp_packages = extract_packages_from_yaml(comp_data)
if all_packages is None:
# First component - set the baseline
all_packages = comp_packages
elif not comp_packages:
# This component has no packages (no_buses) - it can merge with any group
pass
elif not all_packages:
# Previous components had no packages, but this one does - adopt these packages
all_packages = comp_packages
elif comp_packages != all_packages:
# Both have packages but they differ - this is an error
raise ValueError(
f"Component {comp_name} has different packages than previous components. "
f"Expected: {all_packages}, Got: {comp_packages}. "
f"All components must use the same common bus configs to be merged."
)
# Handle $component_dir by replacing with absolute path
# This allows components that use local file references to be grouped
comp_abs_dir = str(comp_dir.absolute())
# Save top-level substitutions BEFORE expanding packages
# In ESPHome, top-level substitutions override package substitutions
top_level_subs = (
comp_data["substitutions"].copy()
if "substitutions" in comp_data and comp_data["substitutions"] is not None
else {}
)
# Expand packages - but we'll restore substitution priority after
if "packages" in comp_data:
packages_value = comp_data["packages"]
if isinstance(packages_value, dict):
# Dict format - check each package
common_bus_packages = get_common_bus_packages()
for pkg_name, pkg_value in list(packages_value.items()):
if pkg_name in common_bus_packages:
continue
if not isinstance(pkg_value, dict):
continue
# Component-specific package - expand its content into top level
comp_data = merge_config(comp_data, pkg_value)
elif isinstance(packages_value, list):
# List format - expand all package includes
for pkg_value in packages_value:
if not isinstance(pkg_value, dict):
continue
comp_data = merge_config(comp_data, pkg_value)
# Remove all packages (common will be re-added at the end)
del comp_data["packages"]
# Restore top-level substitution priority
# Top-level substitutions override any from packages
if "substitutions" not in comp_data or comp_data["substitutions"] is None:
comp_data["substitutions"] = {}
# Merge: package subs as base, top-level subs override
comp_data["substitutions"].update(top_level_subs)
# Now prefix the final merged substitutions
comp_data["substitutions"] = {
f"{comp_name}_{sub_name}": sub_value
for sub_name, sub_value in comp_data["substitutions"].items()
}
# Add component_dir substitution with absolute path for this component
comp_data["substitutions"][f"{comp_name}_component_dir"] = comp_abs_dir
# Prefix substitution references throughout the config
comp_data = prefix_substitutions_in_dict(comp_data, comp_name)
# Use ESPHome's merge_config to merge this component into the result
# merge_config handles list merging with ID-based deduplication automatically
merged_config_data = merge_config(merged_config_data, comp_data)
# Add packages back (only once, since they're identical)
# IMPORTANT: Only re-add common bus packages (spi, i2c, uart, etc.)
# Do NOT re-add component-specific packages as they contain unprefixed $component_dir refs
if all_packages:
first_comp_data = load_yaml_file(
tests_dir / component_names[0] / f"test.{platform}.yaml"
)
if "packages" in first_comp_data and isinstance(
first_comp_data["packages"], dict
):
# Filter to only include common bus packages
# Only dict format can contain common bus packages
common_bus_packages = get_common_bus_packages()
filtered_packages = {
name: value
for name, value in first_comp_data["packages"].items()
if name in common_bus_packages
}
if filtered_packages:
merged_config_data["packages"] = filtered_packages
# Deduplicate items with same ID (keeps first occurrence)
merged_config_data = deduplicate_by_id(merged_config_data)
# Remove esphome section since it will be provided by the wrapper file
# The wrapper file includes this merged config via packages and provides
# the proper esphome: section with name, platform, etc.
if "esphome" in merged_config_data:
del merged_config_data["esphome"]
# Write merged config
output_file.parent.mkdir(parents=True, exist_ok=True)
yaml_content = yaml_util.dump(merged_config_data)
output_file.write_text(yaml_content)
print(f"Successfully merged {len(component_names)} components into {output_file}")
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Merge multiple component test configs into a single file"
)
parser.add_argument(
"--components",
"-c",
required=True,
help="Comma-separated list of component names to merge",
)
parser.add_argument(
"--platform",
"-p",
required=True,
help="Platform to merge for (e.g., esp32-ard)",
)
parser.add_argument(
"--output",
"-o",
required=True,
type=Path,
help="Output file path for merged config",
)
parser.add_argument(
"--tests-dir",
type=Path,
default=Path("tests/components"),
help="Path to tests/components directory",
)
args = parser.parse_args()
component_names = [c.strip() for c in args.components.split(",")]
try:
merge_component_configs(
component_names=component_names,
platform=args.platform,
tests_dir=args.tests_dir,
output_file=args.output,
)
except Exception as e:
print(f"Error merging configs: {e}", file=sys.stderr)
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()

268
script/split_components_for_ci.py Executable file
View File

@@ -0,0 +1,268 @@
#!/usr/bin/env python3
"""Split components into batches with intelligent grouping.
This script analyzes components to identify which ones share common bus configurations
and intelligently groups them into batches to maximize the efficiency of the
component grouping system in CI.
Components with the same bus signature are placed in the same batch whenever possible,
allowing the test_build_components.py script to merge them into single builds.
"""
from __future__ import annotations
import argparse
from collections import defaultdict
import json
from pathlib import Path
import sys
# Add esphome to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from script.analyze_component_buses import (
ISOLATED_COMPONENTS,
NO_BUSES_SIGNATURE,
analyze_all_components,
create_grouping_signature,
)
# Weighting for batch creation
# Isolated components can't be grouped/merged, so they count as 10x
# Groupable components can be merged into single builds, so they count as 1x
ISOLATED_WEIGHT = 10
GROUPABLE_WEIGHT = 1
def has_test_files(component_name: str, tests_dir: Path) -> bool:
"""Check if a component has test files.
Args:
component_name: Name of the component
tests_dir: Path to tests/components directory
Returns:
True if the component has test.*.yaml files
"""
component_dir = tests_dir / component_name
if not component_dir.exists() or not component_dir.is_dir():
return False
# Check for test.*.yaml files
return any(component_dir.glob("test.*.yaml"))
def create_intelligent_batches(
components: list[str],
tests_dir: Path,
batch_size: int = 40,
) -> list[list[str]]:
"""Create batches optimized for component grouping.
Args:
components: List of component names to batch
tests_dir: Path to tests/components directory
batch_size: Target size for each batch
Returns:
List of component batches (lists of component names)
"""
# Filter out components without test files
# Platform components like 'climate' and 'climate_ir' don't have test files
components_with_tests = [
comp for comp in components if has_test_files(comp, tests_dir)
]
# Log filtered components to stderr for debugging
if len(components_with_tests) < len(components):
filtered_out = set(components) - set(components_with_tests)
print(
f"Note: Filtered {len(filtered_out)} components without test files: "
f"{', '.join(sorted(filtered_out))}",
file=sys.stderr,
)
# Analyze all components to get their bus signatures
component_buses, non_groupable, _direct_bus_components = analyze_all_components(
tests_dir
)
# Group components by their bus signature ONLY (ignore platform)
# All platforms will be tested by test_build_components.py for each batch
# Key: signature, Value: list of components
signature_groups: dict[str, list[str]] = defaultdict(list)
for component in components_with_tests:
# Components that can't be grouped get unique signatures
# This includes both manually curated ISOLATED_COMPONENTS and
# automatically detected non_groupable components
# These can share a batch/runner but won't be grouped/merged
if component in ISOLATED_COMPONENTS or component in non_groupable:
signature_groups[f"isolated_{component}"].append(component)
continue
# Get signature from any platform (they should all have the same buses)
# Components not in component_buses were filtered out by has_test_files check
comp_platforms = component_buses[component]
for platform, buses in comp_platforms.items():
if buses:
signature = create_grouping_signature({platform: buses}, platform)
# Group by signature only - platform doesn't matter for batching
signature_groups[signature].append(component)
break # Only use first platform for grouping
else:
# No buses found for any platform - can be grouped together
signature_groups[NO_BUSES_SIGNATURE].append(component)
# Create batches by keeping signature groups together
# Components with the same signature stay in the same batches
batches = []
# Sort signature groups to prioritize groupable components
# 1. Put "isolated_*" signatures last (can't be grouped with others)
# 2. Sort groupable signatures by size (largest first)
# 3. "no_buses" components CAN be grouped together
def sort_key(item):
signature, components = item
is_isolated = signature.startswith("isolated_")
# Put "isolated_*" last (1), groupable first (0)
# Within each category, sort by size (largest first)
return (is_isolated, -len(components))
sorted_groups = sorted(signature_groups.items(), key=sort_key)
# Strategy: Create batches using weighted sizes
# - Isolated components count as 10x (since they can't be grouped/merged)
# - Groupable components count as 1x (can be merged into single builds)
# - This distributes isolated components across more runners
# - Ensures each runner has a good mix of groupable vs isolated components
current_batch = []
current_weight = 0
for signature, group_components in sorted_groups:
is_isolated = signature.startswith("isolated_")
weight_per_component = ISOLATED_WEIGHT if is_isolated else GROUPABLE_WEIGHT
for component in group_components:
# Check if adding this component would exceed the batch size
if current_weight + weight_per_component > batch_size and current_batch:
# Start a new batch
batches.append(current_batch)
current_batch = []
current_weight = 0
# Add component to current batch
current_batch.append(component)
current_weight += weight_per_component
# Don't forget the last batch
if current_batch:
batches.append(current_batch)
return batches
def main() -> int:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Split components into intelligent batches for CI testing"
)
parser.add_argument(
"--components",
"-c",
required=True,
help="JSON array of component names",
)
parser.add_argument(
"--batch-size",
"-b",
type=int,
default=40,
help="Target batch size (default: 40, weighted)",
)
parser.add_argument(
"--tests-dir",
type=Path,
default=Path("tests/components"),
help="Path to tests/components directory",
)
parser.add_argument(
"--output",
"-o",
choices=["json", "github"],
default="github",
help="Output format (json or github for GitHub Actions)",
)
args = parser.parse_args()
# Parse component list from JSON
try:
components = json.loads(args.components)
except json.JSONDecodeError as e:
print(f"Error parsing components JSON: {e}", file=sys.stderr)
return 1
if not isinstance(components, list):
print("Components must be a JSON array", file=sys.stderr)
return 1
# Create intelligent batches
batches = create_intelligent_batches(
components=components,
tests_dir=args.tests_dir,
batch_size=args.batch_size,
)
# Convert batches to space-separated strings for CI
batch_strings = [" ".join(batch) for batch in batches]
if args.output == "json":
# Output as JSON array
print(json.dumps(batch_strings))
else:
# Output for GitHub Actions (set output)
output_json = json.dumps(batch_strings)
print(f"components={output_json}")
# Print summary to stderr so it shows in CI logs
# Count actual components being batched
actual_components = sum(len(batch.split()) for batch in batch_strings)
# Re-analyze to get isolated component counts for summary
_, non_groupable, _ = analyze_all_components(args.tests_dir)
# Count isolated vs groupable components
all_batched_components = [comp for batch in batches for comp in batch]
isolated_count = sum(
1
for comp in all_batched_components
if comp in ISOLATED_COMPONENTS or comp in non_groupable
)
groupable_count = actual_components - isolated_count
print("\n=== Intelligent Batch Summary ===", file=sys.stderr)
print(f"Total components requested: {len(components)}", file=sys.stderr)
print(f"Components with test files: {actual_components}", file=sys.stderr)
print(f" - Groupable (weight=1): {groupable_count}", file=sys.stderr)
print(f" - Isolated (weight=10): {isolated_count}", file=sys.stderr)
if actual_components < len(components):
print(
f"Components skipped (no test files): {len(components) - actual_components}",
file=sys.stderr,
)
print(f"Number of batches: {len(batches)}", file=sys.stderr)
print(f"Batch size target (weighted): {args.batch_size}", file=sys.stderr)
if len(batches) > 0:
print(
f"Average components per batch: {actual_components / len(batches):.1f}",
file=sys.stderr,
)
print(file=sys.stderr)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,106 +0,0 @@
#!/usr/bin/env bash
set -e
help() {
echo "Usage: $0 [-e <config|compile|clean>] [-c <string>] [-t <string>]" 1>&2
echo 1>&2
echo " - e - Parameter for esphome command. Default compile. Common alternative is config." 1>&2
echo " - c - Component folder name to test. Default *. E.g. '-c logger'." 1>&2
echo " - t - Target name to test. Put '-t list' to display all possibilities. E.g. '-t esp32-s2-idf-51'." 1>&2
exit 1
}
# Parse parameter:
# - `e` - Parameter for `esphome` command. Default `compile`. Common alternative is `config`.
# - `c` - Component folder name to test. Default `*`.
esphome_command="compile"
target_component="*"
while getopts e:c:t: flag
do
case $flag in
e) esphome_command=${OPTARG};;
c) target_component=${OPTARG};;
t) requested_target_platform=${OPTARG};;
\?) help;;
esac
done
cd "$(dirname "$0")/.."
if ! [ -d "./tests/test_build_components/build" ]; then
mkdir ./tests/test_build_components/build
fi
start_esphome() {
if [ -n "$requested_target_platform" ] && [ "$requested_target_platform" != "$target_platform_with_version" ]; then
echo "Skipping $target_platform_with_version"
return
fi
# create dynamic yaml file in `build` folder.
# `./tests/test_build_components/build/[target_component].[test_name].[target_platform_with_version].yaml`
component_test_file="./tests/test_build_components/build/$target_component.$test_name.$target_platform_with_version.yaml"
cp $target_platform_file $component_test_file
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS sed is...different
sed -i '' "s!\$component_test_file!../../.$f!g" $component_test_file
else
sed -i "s!\$component_test_file!../../.$f!g" $component_test_file
fi
# Start esphome process
echo "> [$target_component] [$test_name] [$target_platform_with_version]"
set -x
# TODO: Validate escape of Command line substitution value
python3 -m esphome -s component_name $target_component -s component_dir ../../components/$target_component -s test_name $test_name -s target_platform $target_platform $esphome_command $component_test_file
{ set +x; } 2>/dev/null
}
# Find all test yaml files.
# - `./tests/components/[target_component]/[test_name].[target_platform].yaml`
# - `./tests/components/[target_component]/[test_name].all.yaml`
for f in ./tests/components/$target_component/*.*.yaml; do
[ -f "$f" ] || continue
IFS='/' read -r -a folder_name <<< "$f"
target_component="${folder_name[3]}"
IFS='.' read -r -a file_name <<< "${folder_name[4]}"
test_name="${file_name[0]}"
target_platform="${file_name[1]}"
file_name_parts=${#file_name[@]}
if [ "$target_platform" = "all" ] || [ $file_name_parts = 2 ]; then
# Test has *not* defined a specific target platform. Need to run tests for all possible target platforms.
for target_platform_file in ./tests/test_build_components/build_components_base.*.yaml; do
IFS='/' read -r -a folder_name <<< "$target_platform_file"
IFS='.' read -r -a file_name <<< "${folder_name[3]}"
target_platform="${file_name[1]}"
start_esphome
done
else
# Test has defined a specific target platform.
# Validate we have a base test yaml for selected platform.
# The target_platform is sourced from the following location.
# 1. `./tests/test_build_components/build_components_base.[target_platform].yaml`
# 2. `./tests/test_build_components/build_components_base.[target_platform]-ard.yaml`
target_platform_file="./tests/test_build_components/build_components_base.$target_platform.yaml"
if ! [ -f "$target_platform_file" ]; then
echo "No base test file [./tests/test_build_components/build_components_base.$target_platform.yaml] for component test [$f] found."
exit 1
fi
for target_platform_file in ./tests/test_build_components/build_components_base.$target_platform*.yaml; do
# trim off "./tests/test_build_components/build_components_base." prefix
target_platform_with_version=${target_platform_file:52}
# ...now remove suffix starting with "." leaving just the test target hardware and software platform (possibly with version)
# For example: "esp32-s3-idf-50"
target_platform_with_version=${target_platform_with_version%.*}
start_esphome
done
fi
done

View File

@@ -0,0 +1 @@
test_build_components.py

931
script/test_build_components.py Executable file
View File

@@ -0,0 +1,931 @@
#!/usr/bin/env python3
"""Test ESPHome component builds with intelligent grouping.
This script replaces the bash test_build_components script with Python,
adding support for intelligent component grouping based on shared bus
configurations to reduce CI build time.
Features:
- Analyzes components for shared common bus configs
- Groups compatible components together
- Merges configs for grouped components
- Uses --testing-mode for grouped tests
- Maintains backward compatibility with single component testing
"""
from __future__ import annotations
import argparse
from collections import defaultdict
import hashlib
import os
from pathlib import Path
import subprocess
import sys
# Add esphome to path
sys.path.insert(0, str(Path(__file__).parent.parent))
# pylint: disable=wrong-import-position
from script.analyze_component_buses import (
BASE_BUS_COMPONENTS,
ISOLATED_COMPONENTS,
NO_BUSES_SIGNATURE,
analyze_all_components,
create_grouping_signature,
is_platform_component,
uses_local_file_references,
)
from script.merge_component_configs import merge_component_configs
# Platform-specific maximum group sizes
# ESP8266 has limited IRAM and can't handle large component groups
PLATFORM_MAX_GROUP_SIZE = {
"esp8266-ard": 10, # ESP8266 Arduino has limited IRAM
"esp8266-idf": 10, # ESP8266 IDF also has limited IRAM
# BK72xx now uses BK7252 board (1.62MB flash vs 1.03MB) - no limit needed
# Other platforms can handle larger groups
}
def show_disk_space_if_ci(esphome_command: str) -> None:
"""Show disk space usage if running in CI during compile.
Args:
esphome_command: The esphome command being run (config/compile/clean)
"""
if os.environ.get("GITHUB_ACTIONS") and esphome_command == "compile":
print("\n" + "=" * 80)
print("Disk Space After Build:")
print("=" * 80)
subprocess.run(["df", "-h"], check=False)
print("=" * 80 + "\n")
def find_component_tests(
components_dir: Path, component_pattern: str = "*"
) -> dict[str, list[Path]]:
"""Find all component test files.
Args:
components_dir: Path to tests/components directory
component_pattern: Glob pattern for component names
Returns:
Dictionary mapping component name to list of test files
"""
component_tests = defaultdict(list)
for comp_dir in components_dir.glob(component_pattern):
if not comp_dir.is_dir():
continue
for test_file in comp_dir.glob("test.*.yaml"):
component_tests[comp_dir.name].append(test_file)
return dict(component_tests)
def parse_test_filename(test_file: Path) -> tuple[str, str]:
"""Parse test filename to extract test name and platform.
Args:
test_file: Path to test file
Returns:
Tuple of (test_name, platform)
"""
parts = test_file.stem.split(".")
if len(parts) == 2:
return parts[0], parts[1] # test, platform
return parts[0], "all"
def get_platform_base_files(base_dir: Path) -> dict[str, list[Path]]:
"""Get all platform base files.
Args:
base_dir: Path to test_build_components directory
Returns:
Dictionary mapping platform to list of base files (for version variants)
"""
platform_files = defaultdict(list)
for base_file in base_dir.glob("build_components_base.*.yaml"):
# Extract platform from filename
# e.g., build_components_base.esp32-idf.yaml -> esp32-idf
# or build_components_base.esp32-idf-50.yaml -> esp32-idf
filename = base_file.stem
parts = filename.replace("build_components_base.", "").split("-")
# Platform is everything before version number (if present)
# Check if last part is a number (version)
platform = "-".join(parts[:-1]) if parts[-1].isdigit() else "-".join(parts)
platform_files[platform].append(base_file)
return dict(platform_files)
def extract_platform_with_version(base_file: Path) -> str:
"""Extract platform with version from base filename.
Args:
base_file: Path to base file
Returns:
Platform with version (e.g., "esp32-idf-50" or "esp32-idf")
"""
# Remove "build_components_base." prefix and ".yaml" suffix
return base_file.stem.replace("build_components_base.", "")
def run_esphome_test(
component: str,
test_file: Path,
platform: str,
platform_with_version: str,
base_file: Path,
build_dir: Path,
esphome_command: str,
continue_on_fail: bool,
use_testing_mode: bool = False,
) -> tuple[bool, str]:
"""Run esphome test for a single component.
Args:
component: Component name
test_file: Path to component test file
platform: Platform name (e.g., "esp32-idf")
platform_with_version: Platform with version (e.g., "esp32-idf-50")
base_file: Path to platform base file
build_dir: Path to build directory
esphome_command: ESPHome command (config/compile)
continue_on_fail: Whether to continue on failure
use_testing_mode: Whether to use --testing-mode flag
Returns:
Tuple of (success status, command string)
"""
test_name = test_file.stem.split(".")[0]
# Create dynamic test file in build directory
output_file = build_dir / f"{component}.{test_name}.{platform_with_version}.yaml"
# Copy base file and substitute component test file reference
base_content = base_file.read_text()
# Get relative path from build dir to test file
repo_root = Path(__file__).parent.parent
component_test_ref = f"../../{test_file.relative_to(repo_root / 'tests')}"
output_content = base_content.replace("$component_test_file", component_test_ref)
output_file.write_text(output_content)
# Build esphome command
cmd = [
sys.executable,
"-m",
"esphome",
]
# Add --testing-mode if needed (must be before subcommand)
if use_testing_mode:
cmd.append("--testing-mode")
# Add substitutions
cmd.extend(
[
"-s",
"component_name",
component,
"-s",
"component_dir",
f"../../components/{component}",
"-s",
"test_name",
test_name,
"-s",
"target_platform",
platform,
]
)
# Add command and config file
cmd.extend([esphome_command, str(output_file)])
# Build command string for display/logging
cmd_str = " ".join(cmd)
# Run command
print(f"> [{component}] [{test_name}] [{platform_with_version}]")
if use_testing_mode:
print(" (using --testing-mode)")
try:
result = subprocess.run(cmd, check=False)
success = result.returncode == 0
# Show disk space after build in CI during compile
show_disk_space_if_ci(esphome_command)
if not success and not continue_on_fail:
# Print command immediately for failed tests
print(f"\n{'=' * 80}")
print("FAILED - Command to reproduce:")
print(f"{'=' * 80}")
print(cmd_str)
print()
raise subprocess.CalledProcessError(result.returncode, cmd)
return success, cmd_str
except subprocess.CalledProcessError:
# Re-raise if we're not continuing on fail
if not continue_on_fail:
raise
return False, cmd_str
def run_grouped_test(
components: list[str],
platform: str,
platform_with_version: str,
base_file: Path,
build_dir: Path,
tests_dir: Path,
esphome_command: str,
continue_on_fail: bool,
) -> tuple[bool, str]:
"""Run esphome test for a group of components with shared bus configs.
Args:
components: List of component names to test together
platform: Platform name (e.g., "esp32-idf")
platform_with_version: Platform with version (e.g., "esp32-idf-50")
base_file: Path to platform base file
build_dir: Path to build directory
tests_dir: Path to tests/components directory
esphome_command: ESPHome command (config/compile)
continue_on_fail: Whether to continue on failure
Returns:
Tuple of (success status, command string)
"""
# Create merged config
group_name = "_".join(components[:3]) # Use first 3 components for name
if len(components) > 3:
group_name += f"_plus_{len(components) - 3}"
# Create unique device name by hashing sorted component list + platform
# This prevents conflicts when different component groups are tested
sorted_components = sorted(components)
hash_input = "_".join(sorted_components) + "_" + platform
group_hash = hashlib.md5(hash_input.encode()).hexdigest()[:8]
device_name = f"comptest{platform.replace('-', '')}{group_hash}"
merged_config_file = build_dir / f"merged_{group_name}.{platform_with_version}.yaml"
try:
merge_component_configs(
component_names=components,
platform=platform_with_version,
tests_dir=tests_dir,
output_file=merged_config_file,
)
except Exception as e: # pylint: disable=broad-exception-caught
print(f"Error merging configs for {components}: {e}")
if not continue_on_fail:
raise
# Return empty command string since we failed before building the command
return False, f"# Failed during config merge: {e}"
# Create test file that includes merged config
output_file = build_dir / f"test_{group_name}.{platform_with_version}.yaml"
base_content = base_file.read_text()
merged_ref = merged_config_file.name
output_content = base_content.replace("$component_test_file", merged_ref)
output_file.write_text(output_content)
# Build esphome command with --testing-mode
cmd = [
sys.executable,
"-m",
"esphome",
"--testing-mode", # Required for grouped tests
"-s",
"component_name",
device_name, # Use unique hash-based device name
"-s",
"component_dir",
"../../components",
"-s",
"test_name",
"merged",
"-s",
"target_platform",
platform,
esphome_command,
str(output_file),
]
# Build command string for display/logging
cmd_str = " ".join(cmd)
# Run command
components_str = ", ".join(components)
print(f"> [GROUPED: {components_str}] [{platform_with_version}]")
print(" (using --testing-mode)")
try:
result = subprocess.run(cmd, check=False)
success = result.returncode == 0
# Show disk space after build in CI during compile
show_disk_space_if_ci(esphome_command)
if not success and not continue_on_fail:
# Print command immediately for failed tests
print(f"\n{'=' * 80}")
print("FAILED - Command to reproduce:")
print(f"{'=' * 80}")
print(cmd_str)
print()
raise subprocess.CalledProcessError(result.returncode, cmd)
return success, cmd_str
except subprocess.CalledProcessError:
# Re-raise if we're not continuing on fail
if not continue_on_fail:
raise
return False, cmd_str
def run_grouped_component_tests(
all_tests: dict[str, list[Path]],
platform_filter: str | None,
platform_bases: dict[str, list[Path]],
tests_dir: Path,
build_dir: Path,
esphome_command: str,
continue_on_fail: bool,
) -> tuple[set[tuple[str, str]], list[str], list[str], dict[str, str]]:
"""Run grouped component tests.
Args:
all_tests: Dictionary mapping component names to test files
platform_filter: Optional platform to filter by
platform_bases: Platform base files mapping
tests_dir: Path to tests/components directory
build_dir: Path to build directory
esphome_command: ESPHome command (config/compile)
continue_on_fail: Whether to continue on failure
Returns:
Tuple of (tested_components, passed_tests, failed_tests, failed_commands)
"""
tested_components = set()
passed_tests = []
failed_tests = []
failed_commands = {} # Map test_id to command string
# Group components by platform and bus signature
grouped_components: dict[tuple[str, str], list[str]] = defaultdict(list)
print("\n" + "=" * 80)
print("Analyzing components for intelligent grouping...")
print("=" * 80)
component_buses, non_groupable, direct_bus_components = analyze_all_components(
tests_dir
)
# Track why components can't be grouped (for detailed output)
non_groupable_reasons = {}
# Group by (platform, bus_signature)
for component, platforms in component_buses.items():
if component not in all_tests:
continue
# Skip components that must be tested in isolation
# These are shown separately and should not be in non_groupable_reasons
if component in ISOLATED_COMPONENTS:
continue
# Skip base bus components (these test the bus platforms themselves)
if component in BASE_BUS_COMPONENTS:
continue
# Skip components that use local file references or direct bus configs
if component in non_groupable:
# Track the reason (using pre-calculated results to avoid expensive re-analysis)
if component not in non_groupable_reasons:
if component in direct_bus_components:
non_groupable_reasons[component] = (
"Defines buses directly (not via packages) - NEEDS MIGRATION"
)
elif uses_local_file_references(tests_dir / component):
non_groupable_reasons[component] = (
"Uses local file references ($component_dir)"
)
elif is_platform_component(tests_dir / component):
non_groupable_reasons[component] = (
"Platform component (abstract base class)"
)
else:
non_groupable_reasons[component] = (
"Uses !extend or !remove directives"
)
continue
for platform, buses in platforms.items():
# Skip if platform doesn't match filter
if platform_filter and not platform.startswith(platform_filter):
continue
# Create signature for this component's bus configuration
# Components with no buses get NO_BUSES_SIGNATURE so they can be grouped together
if buses:
signature = create_grouping_signature({platform: buses}, platform)
else:
signature = NO_BUSES_SIGNATURE
# Add to grouped components (including those with no buses)
if signature:
grouped_components[(platform, signature)].append(component)
# Print detailed grouping plan
print("\nGrouping Plan:")
print("-" * 80)
# Show isolated components (must test individually due to known issues)
isolated_in_tests = [c for c in ISOLATED_COMPONENTS if c in all_tests]
if isolated_in_tests:
print(
f"\n{len(isolated_in_tests)} components must be tested in isolation (known build issues):"
)
for comp in sorted(isolated_in_tests):
reason = ISOLATED_COMPONENTS[comp]
print(f" - {comp}: {reason}")
# Show base bus components (test the bus platform implementations)
base_bus_in_tests = [c for c in BASE_BUS_COMPONENTS if c in all_tests]
if base_bus_in_tests:
print(
f"\n{len(base_bus_in_tests)} base bus platform components (tested individually):"
)
for comp in sorted(base_bus_in_tests):
print(f" - {comp}")
# Show excluded components with detailed reasons
if non_groupable_reasons:
excluded_in_tests = [c for c in non_groupable_reasons if c in all_tests]
if excluded_in_tests:
print(
f"\n{len(excluded_in_tests)} components excluded from grouping (each needs individual build):"
)
# Group by reason to show summary
direct_bus = [
c
for c in excluded_in_tests
if "NEEDS MIGRATION" in non_groupable_reasons.get(c, "")
]
if direct_bus:
print(
f"\n ⚠⚠⚠ {len(direct_bus)} DEFINE BUSES DIRECTLY - NEED MIGRATION TO PACKAGES:"
)
for comp in sorted(direct_bus):
print(f" - {comp}")
other_reasons = [
c
for c in excluded_in_tests
if "NEEDS MIGRATION" not in non_groupable_reasons.get(c, "")
]
if other_reasons and len(other_reasons) <= 10:
print("\n Other non-groupable components:")
for comp in sorted(other_reasons):
reason = non_groupable_reasons[comp]
print(f" - {comp}: {reason}")
elif other_reasons:
print(
f"\n Other non-groupable components: {len(other_reasons)} components"
)
# Distribute no_buses components into other groups to maximize efficiency
# Components with no buses can merge with any bus group since they have no conflicting requirements
no_buses_by_platform: dict[str, list[str]] = {}
for (platform, signature), components in list(grouped_components.items()):
if signature == NO_BUSES_SIGNATURE:
no_buses_by_platform[platform] = components
# Remove from grouped_components - we'll distribute them
del grouped_components[(platform, signature)]
# Distribute no_buses components into existing groups for each platform
for platform, no_buses_comps in no_buses_by_platform.items():
# Find all non-empty groups for this platform (excluding no_buses)
platform_groups = [
(sig, comps)
for (plat, sig), comps in grouped_components.items()
if plat == platform and sig != NO_BUSES_SIGNATURE
]
if platform_groups:
# Distribute no_buses components round-robin across existing groups
for i, comp in enumerate(no_buses_comps):
sig, _ = platform_groups[i % len(platform_groups)]
grouped_components[(platform, sig)].append(comp)
else:
# No other groups for this platform - keep no_buses components together
grouped_components[(platform, NO_BUSES_SIGNATURE)] = no_buses_comps
# Split groups that exceed platform-specific maximum sizes
# ESP8266 has limited IRAM and can't handle large component groups
split_groups = {}
for (platform, signature), components in list(grouped_components.items()):
max_size = PLATFORM_MAX_GROUP_SIZE.get(platform)
if max_size and len(components) > max_size:
# Split this group into smaller groups
print(
f"\n Splitting {platform} group (signature: {signature}) "
f"from {len(components)} to max {max_size} components per group"
)
# Remove original group
del grouped_components[(platform, signature)]
# Create split groups
for i in range(0, len(components), max_size):
split_components = components[i : i + max_size]
# Create unique signature for each split group
split_signature = f"{signature}_split{i // max_size + 1}"
split_groups[(platform, split_signature)] = split_components
# Add split groups back
grouped_components.update(split_groups)
groups_to_test = []
individual_tests = set() # Use set to avoid duplicates
for (platform, signature), components in sorted(grouped_components.items()):
if len(components) > 1:
groups_to_test.append((platform, signature, components))
# Note: Don't add single-component groups to individual_tests here
# They'll be added below when we check for ungrouped components
# Add components that weren't grouped on any platform
for component in all_tests:
if component not in [c for _, _, comps in groups_to_test for c in comps]:
individual_tests.add(component)
if groups_to_test:
print(f"\n{len(groups_to_test)} groups will be tested together:")
for platform, signature, components in groups_to_test:
component_list = ", ".join(sorted(components))
print(f" [{platform}] [{signature}]: {component_list}")
print(
f"{len(components)} components in 1 build (saves {len(components) - 1} builds)"
)
if individual_tests:
print(f"\n{len(individual_tests)} components will be tested individually:")
sorted_individual = sorted(individual_tests)
for comp in sorted_individual[:10]:
print(f" - {comp}")
if len(individual_tests) > 10:
print(f" ... and {len(individual_tests) - 10} more")
# Calculate actual build counts based on test files, not component counts
# Without grouping: every test file would be built separately
total_test_files = sum(len(test_files) for test_files in all_tests.values())
# With grouping:
# - 1 build per group (regardless of how many components)
# - Individual components still need all their platform builds
individual_test_file_count = sum(
len(all_tests[comp]) for comp in individual_tests if comp in all_tests
)
total_grouped_components = sum(len(comps) for _, _, comps in groups_to_test)
total_builds_with_grouping = len(groups_to_test) + individual_test_file_count
builds_saved = total_test_files - total_builds_with_grouping
print(f"\n{'=' * 80}")
print(
f"Summary: {total_builds_with_grouping} builds total (vs {total_test_files} without grouping)"
)
print(
f"{len(groups_to_test)} grouped builds ({total_grouped_components} components)"
)
print(
f"{individual_test_file_count} individual builds ({len(individual_tests)} components)"
)
if total_test_files > 0:
reduction_pct = (builds_saved / total_test_files) * 100
print(f" • Saves {builds_saved} builds ({reduction_pct:.1f}% reduction)")
print("=" * 80 + "\n")
# Execute grouped tests
for (platform, signature), components in grouped_components.items():
# Only group if we have multiple components with same signature
if len(components) <= 1:
continue
# Filter out components not in our test list
components_to_group = [c for c in components if c in all_tests]
if len(components_to_group) <= 1:
continue
# Get platform base files
if platform not in platform_bases:
continue
for base_file in platform_bases[platform]:
platform_with_version = extract_platform_with_version(base_file)
# Skip if platform filter doesn't match
if platform_filter and platform != platform_filter:
continue
if (
platform_filter
and platform_with_version != platform_filter
and not platform_with_version.startswith(f"{platform_filter}-")
):
continue
# Run grouped test
success, cmd_str = run_grouped_test(
components=components_to_group,
platform=platform,
platform_with_version=platform_with_version,
base_file=base_file,
build_dir=build_dir,
tests_dir=tests_dir,
esphome_command=esphome_command,
continue_on_fail=continue_on_fail,
)
# Mark all components as tested
for comp in components_to_group:
tested_components.add((comp, platform_with_version))
# Record result for each component - show all components in grouped tests
test_id = (
f"GROUPED[{','.join(components_to_group)}].{platform_with_version}"
)
if success:
passed_tests.append(test_id)
else:
failed_tests.append(test_id)
failed_commands[test_id] = cmd_str
return tested_components, passed_tests, failed_tests, failed_commands
def run_individual_component_test(
component: str,
test_file: Path,
platform: str,
platform_with_version: str,
base_file: Path,
build_dir: Path,
esphome_command: str,
continue_on_fail: bool,
tested_components: set[tuple[str, str]],
passed_tests: list[str],
failed_tests: list[str],
failed_commands: dict[str, str],
) -> None:
"""Run an individual component test if not already tested in a group.
Args:
component: Component name
test_file: Test file path
platform: Platform name
platform_with_version: Platform with version
base_file: Base file for platform
build_dir: Build directory
esphome_command: ESPHome command
continue_on_fail: Whether to continue on failure
tested_components: Set of already tested components
passed_tests: List to append passed test IDs
failed_tests: List to append failed test IDs
failed_commands: Dict to store failed test commands
"""
# Skip if already tested in a group
if (component, platform_with_version) in tested_components:
return
test_name = test_file.stem.split(".")[0]
success, cmd_str = run_esphome_test(
component=component,
test_file=test_file,
platform=platform,
platform_with_version=platform_with_version,
base_file=base_file,
build_dir=build_dir,
esphome_command=esphome_command,
continue_on_fail=continue_on_fail,
)
test_id = f"{component}.{test_name}.{platform_with_version}"
if success:
passed_tests.append(test_id)
else:
failed_tests.append(test_id)
failed_commands[test_id] = cmd_str
def test_components(
component_patterns: list[str],
platform_filter: str | None,
esphome_command: str,
continue_on_fail: bool,
enable_grouping: bool = True,
) -> int:
"""Test components with optional intelligent grouping.
Args:
component_patterns: List of component name patterns
platform_filter: Optional platform to filter by
esphome_command: ESPHome command (config/compile)
continue_on_fail: Whether to continue on failure
enable_grouping: Whether to enable component grouping
Returns:
Exit code (0 for success, 1 for failure)
"""
# Setup paths
repo_root = Path(__file__).parent.parent
tests_dir = repo_root / "tests" / "components"
build_components_dir = repo_root / "tests" / "test_build_components"
build_dir = build_components_dir / "build"
build_dir.mkdir(parents=True, exist_ok=True)
# Get platform base files
platform_bases = get_platform_base_files(build_components_dir)
# Find all component tests
all_tests = {}
for pattern in component_patterns:
all_tests.update(find_component_tests(tests_dir, pattern))
if not all_tests:
print(f"No components found matching: {component_patterns}")
return 1
print(f"Found {len(all_tests)} components to test")
# Run tests
failed_tests = []
passed_tests = []
tested_components = set() # Track which components were tested in groups
failed_commands = {} # Track commands for failed tests
# First, run grouped tests if grouping is enabled
if enable_grouping:
(
tested_components,
passed_tests,
failed_tests,
failed_commands,
) = run_grouped_component_tests(
all_tests=all_tests,
platform_filter=platform_filter,
platform_bases=platform_bases,
tests_dir=tests_dir,
build_dir=build_dir,
esphome_command=esphome_command,
continue_on_fail=continue_on_fail,
)
# Then run individual tests for components not in groups
for component, test_files in sorted(all_tests.items()):
for test_file in test_files:
test_name, platform = parse_test_filename(test_file)
# Handle "all" platform tests
if platform == "all":
# Run for all platforms
for plat, base_files in platform_bases.items():
if platform_filter and plat != platform_filter:
continue
for base_file in base_files:
platform_with_version = extract_platform_with_version(base_file)
run_individual_component_test(
component=component,
test_file=test_file,
platform=plat,
platform_with_version=platform_with_version,
base_file=base_file,
build_dir=build_dir,
esphome_command=esphome_command,
continue_on_fail=continue_on_fail,
tested_components=tested_components,
passed_tests=passed_tests,
failed_tests=failed_tests,
failed_commands=failed_commands,
)
else:
# Platform-specific test
if platform_filter and platform != platform_filter:
continue
if platform not in platform_bases:
print(f"No base file for platform: {platform}")
continue
for base_file in platform_bases[platform]:
platform_with_version = extract_platform_with_version(base_file)
# Skip if requested platform doesn't match
if (
platform_filter
and platform_with_version != platform_filter
and not platform_with_version.startswith(f"{platform_filter}-")
):
continue
run_individual_component_test(
component=component,
test_file=test_file,
platform=platform,
platform_with_version=platform_with_version,
base_file=base_file,
build_dir=build_dir,
esphome_command=esphome_command,
continue_on_fail=continue_on_fail,
tested_components=tested_components,
passed_tests=passed_tests,
failed_tests=failed_tests,
failed_commands=failed_commands,
)
# Print summary
print("\n" + "=" * 80)
print(f"Test Summary: {len(passed_tests)} passed, {len(failed_tests)} failed")
print("=" * 80)
if failed_tests:
print("\nFailed tests:")
for test in failed_tests:
print(f" - {test}")
# Print failed commands at the end for easy copy-paste from CI logs
print("\n" + "=" * 80)
print("Failed test commands (copy-paste to reproduce locally):")
print("=" * 80)
for test in failed_tests:
if test in failed_commands:
print(f"\n# {test}")
print(failed_commands[test])
print()
return 1
return 0
def main() -> int:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Test ESPHome component builds with intelligent grouping"
)
parser.add_argument(
"-e",
"--esphome-command",
default="compile",
choices=["config", "compile", "clean"],
help="ESPHome command to run (default: compile)",
)
parser.add_argument(
"-c",
"--components",
default="*",
help="Component pattern(s) to test (default: *). Comma-separated.",
)
parser.add_argument(
"-t",
"--target",
help="Target platform to test (e.g., esp32-idf)",
)
parser.add_argument(
"-f",
"--continue-on-fail",
action="store_true",
help="Continue testing even if a test fails",
)
parser.add_argument(
"--no-grouping",
action="store_true",
help="Disable component grouping (test each component individually)",
)
args = parser.parse_args()
# Parse component patterns
component_patterns = [p.strip() for p in args.components.split(",")]
return test_components(
component_patterns=component_patterns,
platform_filter=args.target,
esphome_command=args.esphome_command,
continue_on_fail=args.continue_on_fail,
enable_grouping=not args.no_grouping,
)
if __name__ == "__main__":
sys.exit(main())

227
script/test_component_grouping.py Executable file
View File

@@ -0,0 +1,227 @@
#!/usr/bin/env python3
"""Test component grouping by finding and testing groups of components.
This script analyzes components, finds groups that can be tested together,
and runs test builds for those groups.
"""
from __future__ import annotations
import argparse
from pathlib import Path
import subprocess
import sys
# Add esphome to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from script.analyze_component_buses import (
analyze_all_components,
group_components_by_signature,
)
def test_component_group(
components: list[str],
platform: str,
esphome_command: str = "compile",
dry_run: bool = False,
) -> bool:
"""Test a group of components together.
Args:
components: List of component names to test together
platform: Platform to test on (e.g., "esp32-idf")
esphome_command: ESPHome command to run (config/compile/clean)
dry_run: If True, only print the command without running it
Returns:
True if test passed, False otherwise
"""
components_str = ",".join(components)
cmd = [
"./script/test_build_components",
"-c",
components_str,
"-t",
platform,
"-e",
esphome_command,
]
print(f"\n{'=' * 80}")
print(f"Testing {len(components)} components on {platform}:")
for comp in components:
print(f" - {comp}")
print(f"{'=' * 80}")
print(f"Command: {' '.join(cmd)}\n")
if dry_run:
print("[DRY RUN] Skipping actual test")
return True
try:
result = subprocess.run(cmd, check=False)
return result.returncode == 0
except Exception as e:
print(f"Error running test: {e}")
return False
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Test component grouping by finding and testing groups"
)
parser.add_argument(
"--platform",
"-p",
default="esp32-idf",
help="Platform to test (default: esp32-idf)",
)
parser.add_argument(
"-e",
"--esphome-command",
default="compile",
choices=["config", "compile", "clean"],
help="ESPHome command to run (default: compile)",
)
parser.add_argument(
"--all",
action="store_true",
help="Test all components (sets --min-size=1, --max-size=10000, --max-groups=10000)",
)
parser.add_argument(
"--min-size",
type=int,
default=3,
help="Minimum group size to test (default: 3)",
)
parser.add_argument(
"--max-size",
type=int,
default=10,
help="Maximum group size to test (default: 10)",
)
parser.add_argument(
"--max-groups",
type=int,
default=5,
help="Maximum number of groups to test (default: 5)",
)
parser.add_argument(
"--signature",
"-s",
help="Only test groups with this bus signature (e.g., 'spi', 'i2c', 'uart')",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Print commands without running them",
)
args = parser.parse_args()
# If --all is specified, test all components without grouping
if args.all:
# Get all components from tests/components directory
components_dir = Path("tests/components")
all_components = sorted(
[d.name for d in components_dir.iterdir() if d.is_dir()]
)
if not all_components:
print(f"\nNo components found in {components_dir}")
return
print(f"\nTesting all {len(all_components)} components together")
success = test_component_group(
all_components, args.platform, args.esphome_command, args.dry_run
)
# Print summary
print(f"\n{'=' * 80}")
print("TEST SUMMARY")
print(f"{'=' * 80}")
status = "✅ PASS" if success else "❌ FAIL"
print(f"{status} All components: {len(all_components)} components")
if not args.dry_run and not success:
sys.exit(1)
return
print("Analyzing all components...")
components, non_groupable, _ = analyze_all_components(Path("tests/components"))
print(f"Found {len(components)} components, {len(non_groupable)} non-groupable")
# Group components by signature for the platform
groups = group_components_by_signature(components, args.platform)
# Filter and sort groups
filtered_groups = []
for signature, comp_list in groups.items():
# Filter by signature if specified
if args.signature and signature != args.signature:
continue
# Remove non-groupable components
comp_list = [c for c in comp_list if c not in non_groupable]
# Filter by minimum size
if len(comp_list) < args.min_size:
continue
# If group is larger than max_size, we'll take a subset later
filtered_groups.append((signature, comp_list))
# Sort by group size (largest first)
filtered_groups.sort(key=lambda x: len(x[1]), reverse=True)
# Limit number of groups
filtered_groups = filtered_groups[: args.max_groups]
if not filtered_groups:
print("\nNo groups found matching criteria:")
print(f" - Platform: {args.platform}")
print(f" - Size: {args.min_size}-{args.max_size}")
if args.signature:
print(f" - Signature: {args.signature}")
return
print(f"\nFound {len(filtered_groups)} groups to test:")
for signature, comp_list in filtered_groups:
print(f" [{signature}]: {len(comp_list)} components")
# Test each group
results = []
for signature, comp_list in filtered_groups:
# Limit to max_size if group is larger
if len(comp_list) > args.max_size:
comp_list = comp_list[: args.max_size]
success = test_component_group(
comp_list, args.platform, args.esphome_command, args.dry_run
)
results.append((signature, comp_list, success))
if not args.dry_run and not success:
print(f"\n❌ FAILED: {signature} group")
break
# Print summary
print(f"\n{'=' * 80}")
print("TEST SUMMARY")
print(f"{'=' * 80}")
for signature, comp_list, success in results:
status = "✅ PASS" if success else "❌ FAIL"
print(f"{status} [{signature}]: {len(comp_list)} components")
# Exit with error if any tests failed
if not args.dry_run and any(not success for _, _, success in results):
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,11 +1,4 @@
uart:
- id: uart_a01nyub
tx_pin: ${tx_pin}
rx_pin: ${rx_pin}
baud_rate: 9600
sensor: sensor:
- platform: a01nyub - platform: a01nyub
id: a01nyub_sensor id: a01nyub_sensor
name: a01nyub Distance name: a01nyub Distance
uart_id: uart_a01nyub

View File

@@ -1,5 +0,0 @@
substitutions:
tx_pin: GPIO17
rx_pin: GPIO16
<<: !include common.yaml

View File

@@ -1,5 +0,0 @@
substitutions:
tx_pin: GPIO4
rx_pin: GPIO5
<<: !include common.yaml

View File

@@ -1,3 +1,6 @@
packages:
uart: !include ../../test_build_components/common/uart/esp32-c3-idf.yaml
substitutions: substitutions:
tx_pin: GPIO4 tx_pin: GPIO4
rx_pin: GPIO5 rx_pin: GPIO5

View File

@@ -1,5 +1,8 @@
substitutions: substitutions:
tx_pin: GPIO17 tx_pin: GPIO4
rx_pin: GPIO16 rx_pin: GPIO5
packages:
uart: !include ../../test_build_components/common/uart/esp32-idf.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,5 +1,4 @@
substitutions: packages:
tx_pin: GPIO4 uart: !include ../../test_build_components/common/uart/esp8266-ard.yaml
rx_pin: GPIO5
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,5 +1,4 @@
substitutions: packages:
tx_pin: GPIO4 uart: !include ../../test_build_components/common/uart/rp2040-ard.yaml
rx_pin: GPIO5
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,11 +1,4 @@
uart:
- id: uart_a02yyuw
tx_pin: ${tx_pin}
rx_pin: ${rx_pin}
baud_rate: 9600
sensor: sensor:
- platform: a02yyuw - platform: a02yyuw
id: a02yyuw_sensor id: a02yyuw_sensor
name: a02yyuw Distance name: a02yyuw Distance
uart_id: uart_a02yyuw

View File

@@ -1,5 +0,0 @@
substitutions:
tx_pin: GPIO17
rx_pin: GPIO16
<<: !include common.yaml

View File

@@ -1,5 +0,0 @@
substitutions:
tx_pin: GPIO4
rx_pin: GPIO5
<<: !include common.yaml

View File

@@ -1,3 +1,6 @@
packages:
uart: !include ../../test_build_components/common/uart/esp32-c3-idf.yaml
substitutions: substitutions:
tx_pin: GPIO4 tx_pin: GPIO4
rx_pin: GPIO5 rx_pin: GPIO5

View File

@@ -1,5 +1,8 @@
substitutions: substitutions:
tx_pin: GPIO17 tx_pin: GPIO4
rx_pin: GPIO16 rx_pin: GPIO5
packages:
uart: !include ../../test_build_components/common/uart/esp32-idf.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,5 +1,4 @@
substitutions: packages:
tx_pin: GPIO4 uart: !include ../../test_build_components/common/uart/esp8266-ard.yaml
rx_pin: GPIO5
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,5 +1,4 @@
substitutions: packages:
tx_pin: GPIO4 uart: !include ../../test_build_components/common/uart/rp2040-ard.yaml
rx_pin: GPIO5
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,6 +0,0 @@
substitutions:
step_pin: GPIO22
dir_pin: GPIO23
sleep_pin: GPIO25
<<: !include common.yaml

View File

@@ -1,6 +0,0 @@
substitutions:
step_pin: GPIO2
dir_pin: GPIO3
sleep_pin: GPIO5
<<: !include common.yaml

View File

@@ -1,6 +1,6 @@
substitutions: substitutions:
step_pin: GPIO22 step_pin: GPIO22
dir_pin: GPIO23 dir_pin: GPIO4
sleep_pin: GPIO25 sleep_pin: GPIO25
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,6 +1,6 @@
substitutions: substitutions:
step_pin: GPIO1 step_pin: GPIO1
dir_pin: GPIO2 dir_pin: GPIO2
sleep_pin: GPIO5 sleep_pin: GPIO0
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,5 +1,5 @@
substitutions: substitutions:
gate_pin: GPIO18 gate_pin: GPIO4
zero_cross_pin: GPIO19 zero_cross_pin: GPIO5
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,5 +0,0 @@
substitutions:
gate_pin: GPIO5
zero_cross_pin: GPIO4
<<: !include common.yaml

View File

@@ -1,5 +1,5 @@
substitutions: substitutions:
gate_pin: GPIO5 gate_pin: GPIO0
zero_cross_pin: GPIO4 zero_cross_pin: GPIO2
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,11 +0,0 @@
sensor:
- id: my_sensor
platform: adc
name: ADC Test sensor
update_interval: "1:01"
attenuation: 2.5db
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,7 +1,11 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
platform: adc
pin: P23 pin: P23
attenuation: !remove name: ADC Test sensor
update_interval: "1:01"
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,6 +0,0 @@
packages:
base: !include common.yaml
sensor:
- id: !extend my_sensor
pin: A0

View File

@@ -1,6 +0,0 @@
packages:
base: !include common.yaml
sensor:
- id: !extend my_sensor
pin: 4

View File

@@ -1,6 +1,12 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
pin: 4 platform: adc
pin: GPIO1
name: ADC Test sensor
update_interval: "1:01"
attenuation: 2.5db
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,6 +1,12 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
platform: adc
pin: A0 pin: A0
name: ADC Test sensor
update_interval: "1:01"
attenuation: 2.5db
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,6 +1,12 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
pin: GPIO50 platform: adc
pin: GPIO16
name: ADC Test sensor
update_interval: "1:01"
attenuation: 2.5db
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,6 +0,0 @@
packages:
base: !include common.yaml
sensor:
- id: !extend my_sensor
pin: 1

View File

@@ -1,6 +1,12 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
pin: 1 platform: adc
pin: GPIO1
name: ADC Test sensor
update_interval: "1:01"
attenuation: 2.5db
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,6 +0,0 @@
packages:
base: !include common.yaml
sensor:
- id: !extend my_sensor
pin: 1

View File

@@ -1,6 +1,12 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
pin: 1 platform: adc
pin: GPIO1
name: ADC Test sensor
update_interval: "1:01"
attenuation: 2.5db
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,7 +1,11 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
platform: adc
pin: VCC pin: VCC
attenuation: !remove name: ADC Test sensor
update_interval: "1:01"
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,7 +1,11 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
pin: PA0 platform: adc
attenuation: !remove pin: A5
name: ADC Test sensor
update_interval: "1:01"
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,7 +1,11 @@
packages:
base: !include common.yaml
sensor: sensor:
- id: !extend my_sensor - id: my_sensor
platform: adc
pin: VCC pin: VCC
attenuation: !remove name: ADC Test sensor
update_interval: "1:01"
unit_of_measurement: "°C"
icon: "mdi:water-percent"
accuracy_decimals: 5
setup_priority: -100
force_update: true

View File

@@ -1,9 +1,3 @@
spi:
- id: spi_adc128s102
clk_pin: ${clk_pin}
mosi_pin: ${mosi_pin}
miso_pin: ${miso_pin}
adc128s102: adc128s102:
cs_pin: ${cs_pin} cs_pin: ${cs_pin}
id: adc128s102_adc id: adc128s102_adc

View File

@@ -1,7 +0,0 @@
substitutions:
clk_pin: GPIO16
mosi_pin: GPIO17
miso_pin: GPIO15
cs_pin: GPIO12
<<: !include common.yaml

View File

@@ -1,7 +0,0 @@
substitutions:
clk_pin: GPIO6
mosi_pin: GPIO7
miso_pin: GPIO5
cs_pin: GPIO2
<<: !include common.yaml

View File

@@ -1,7 +1,7 @@
substitutions: substitutions:
clk_pin: GPIO6
mosi_pin: GPIO7
miso_pin: GPIO5
cs_pin: GPIO2 cs_pin: GPIO2
packages:
spi: !include ../../test_build_components/common/spi/esp32-c3-idf.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,7 +1,7 @@
substitutions: substitutions:
clk_pin: GPIO16
mosi_pin: GPIO17
miso_pin: GPIO15
cs_pin: GPIO12 cs_pin: GPIO12
packages:
spi: !include ../../test_build_components/common/spi/esp32-idf.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,7 +1,10 @@
substitutions: substitutions:
clk_pin: GPIO14 clk_pin: GPIO0
mosi_pin: GPIO13 mosi_pin: GPIO2
miso_pin: GPIO12 miso_pin: GPIO16
cs_pin: GPIO15 cs_pin: GPIO15
packages:
spi: !include ../../test_build_components/common/spi/esp8266-ard.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -4,4 +4,7 @@ substitutions:
miso_pin: GPIO4 miso_pin: GPIO4
cs_pin: GPIO5 cs_pin: GPIO5
packages:
spi: !include ../../test_build_components/common/spi/rp2040-ard.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,4 +0,0 @@
substitutions:
pin: GPIO2
<<: !include common-ard-esp32_rmt_led_strip.yaml

View File

@@ -1,4 +0,0 @@
substitutions:
pin: GPIO2
<<: !include common-ard-esp32_rmt_led_strip.yaml

View File

@@ -1,4 +0,0 @@
substitutions:
pin: GPIO2
<<: !include common-ard-fastled.yaml

View File

@@ -1,11 +1,6 @@
i2c:
- id: i2c_ade7880
scl: ${scl_pin}
sda: ${sda_pin}
sensor: sensor:
- platform: ade7880 - platform: ade7880
i2c_id: i2c_ade7880 i2c_id: i2c_bus
irq0_pin: ${irq0_pin} irq0_pin: ${irq0_pin}
irq1_pin: ${irq1_pin} irq1_pin: ${irq1_pin}
reset_pin: ${reset_pin} reset_pin: ${reset_pin}

View File

@@ -1,8 +0,0 @@
substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
irq0_pin: GPIO13
irq1_pin: GPIO15
reset_pin: GPIO16
<<: !include common.yaml

View File

@@ -1,8 +0,0 @@
substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
irq0_pin: GPIO6
irq1_pin: GPIO7
reset_pin: GPIO10
<<: !include common.yaml

View File

@@ -1,8 +1,9 @@
substitutions: substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
irq0_pin: GPIO6 irq0_pin: GPIO6
irq1_pin: GPIO7 irq1_pin: GPIO7
reset_pin: GPIO10 reset_pin: GPIO9
packages:
i2c: !include ../../test_build_components/common/i2c/esp32-c3-idf.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,8 +1,9 @@
substitutions: substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
irq0_pin: GPIO13 irq0_pin: GPIO13
irq1_pin: GPIO15 irq1_pin: GPIO15
reset_pin: GPIO16 reset_pin: GPIO12
packages:
i2c: !include ../../test_build_components/common/i2c/esp32-idf.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,8 +1,9 @@
substitutions: substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
irq0_pin: GPIO13 irq0_pin: GPIO13
irq1_pin: GPIO15 irq1_pin: GPIO15
reset_pin: GPIO16 reset_pin: GPIO16
packages:
i2c: !include ../../test_build_components/common/i2c/esp8266-ard.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,8 +1,9 @@
substitutions: substitutions:
scl_pin: GPIO5
sda_pin: GPIO4
irq0_pin: GPIO13 irq0_pin: GPIO13
irq1_pin: GPIO15 irq1_pin: GPIO15
reset_pin: GPIO16 reset_pin: GPIO16
packages:
i2c: !include ../../test_build_components/common/i2c/rp2040-ard.yaml
<<: !include common.yaml <<: !include common.yaml

View File

@@ -1,20 +1,13 @@
i2c:
- id: i2c_ade7953
scl: ${scl_pin}
sda: ${sda_pin}
sensor: sensor:
- platform: ade7953_i2c - platform: ade7953_i2c
i2c_id: i2c_bus
irq_pin: ${irq_pin} irq_pin: ${irq_pin}
voltage: voltage:
name: ADE7953 Voltage name: ADE7953 Voltage
id: ade7953_voltage
current_a: current_a:
name: ADE7953 Current A name: ADE7953 Current A
id: ade7953_current_a
current_b: current_b:
name: ADE7953 Current B name: ADE7953 Current B
id: ade7953_current_b
power_factor_a: power_factor_a:
name: ADE7953 Power Factor A name: ADE7953 Power Factor A
power_factor_b: power_factor_b:

Some files were not shown because too many files have changed in this diff Show More