mirror of
https://github.com/esphome/esphome.git
synced 2025-09-21 12:42:21 +01:00
Merge branch 'dev' into bad_ind_flash_esp8266
This commit is contained in:
@@ -396,7 +396,10 @@ def check_permissions(port: str):
|
||||
)
|
||||
|
||||
|
||||
def upload_program(config: ConfigType, args: ArgsProtocol, host: str) -> int | str:
|
||||
def upload_program(
|
||||
config: ConfigType, args: ArgsProtocol, devices: list[str]
|
||||
) -> int | str:
|
||||
host = devices[0]
|
||||
try:
|
||||
module = importlib.import_module("esphome.components." + CORE.target_platform)
|
||||
if getattr(module, "upload_program")(config, args, host):
|
||||
@@ -433,10 +436,10 @@ def upload_program(config: ConfigType, args: ArgsProtocol, host: str) -> int | s
|
||||
|
||||
remote_port = int(ota_conf[CONF_PORT])
|
||||
password = ota_conf.get(CONF_PASSWORD, "")
|
||||
binary = args.file if getattr(args, "file", None) is not None else CORE.firmware_bin
|
||||
|
||||
# Check if we should use MQTT for address resolution
|
||||
# This happens when no device was specified, or the current host is "MQTT"/"OTA"
|
||||
devices: list[str] = args.device or []
|
||||
if (
|
||||
CONF_MQTT in config # pylint: disable=too-many-boolean-expressions
|
||||
and (not devices or host in ("MQTT", "OTA"))
|
||||
@@ -447,14 +450,13 @@ def upload_program(config: ConfigType, args: ArgsProtocol, host: str) -> int | s
|
||||
):
|
||||
from esphome import mqtt
|
||||
|
||||
host = mqtt.get_esphome_device_ip(
|
||||
devices = [
|
||||
mqtt.get_esphome_device_ip(
|
||||
config, args.username, args.password, args.client_id
|
||||
)
|
||||
]
|
||||
|
||||
if getattr(args, "file", None) is not None:
|
||||
return espota2.run_ota(host, remote_port, password, args.file)
|
||||
|
||||
return espota2.run_ota(host, remote_port, password, CORE.firmware_bin)
|
||||
return espota2.run_ota(devices, remote_port, password, binary)
|
||||
|
||||
|
||||
def show_logs(config: ConfigType, args: ArgsProtocol, devices: list[str]) -> int | None:
|
||||
@@ -551,17 +553,11 @@ def command_upload(args: ArgsProtocol, config: ConfigType) -> int | None:
|
||||
purpose="uploading",
|
||||
)
|
||||
|
||||
# Try each device until one succeeds
|
||||
exit_code = 1
|
||||
for device in devices:
|
||||
_LOGGER.info("Uploading to %s", device)
|
||||
exit_code = upload_program(config, args, device)
|
||||
exit_code = upload_program(config, args, devices)
|
||||
if exit_code == 0:
|
||||
_LOGGER.info("Successfully uploaded program.")
|
||||
return 0
|
||||
if len(devices) > 1:
|
||||
_LOGGER.warning("Failed to upload to %s", device)
|
||||
|
||||
else:
|
||||
_LOGGER.warning("Failed to upload to %s", devices)
|
||||
return exit_code
|
||||
|
||||
|
||||
|
@@ -64,7 +64,7 @@ void AbsoluteHumidityComponent::loop() {
|
||||
ESP_LOGW(TAG, "No valid state from humidity sensor!");
|
||||
}
|
||||
this->publish_state(NAN);
|
||||
this->status_set_warning("Unable to calculate absolute humidity.");
|
||||
this->status_set_warning(LOG_STR("Unable to calculate absolute humidity."));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -96,7 +96,7 @@ void AHT10Component::read_data_() {
|
||||
ESP_LOGD(TAG, "Read attempt %d at %ums", this->read_count_, (unsigned) (millis() - this->start_time_));
|
||||
}
|
||||
if (this->read(data, 6) != i2c::ERROR_OK) {
|
||||
this->status_set_warning("Read failed, will retry");
|
||||
this->status_set_warning(LOG_STR("Read failed, will retry"));
|
||||
this->restart_read_();
|
||||
return;
|
||||
}
|
||||
@@ -113,7 +113,7 @@ void AHT10Component::read_data_() {
|
||||
} else {
|
||||
ESP_LOGD(TAG, "Invalid humidity, retrying");
|
||||
if (this->write(AHT10_MEASURE_CMD, sizeof(AHT10_MEASURE_CMD)) != i2c::ERROR_OK) {
|
||||
this->status_set_warning(ESP_LOG_MSG_COMM_FAIL);
|
||||
this->status_set_warning(LOG_STR(ESP_LOG_MSG_COMM_FAIL));
|
||||
}
|
||||
this->restart_read_();
|
||||
return;
|
||||
@@ -144,7 +144,7 @@ void AHT10Component::update() {
|
||||
return;
|
||||
this->start_time_ = millis();
|
||||
if (this->write(AHT10_MEASURE_CMD, sizeof(AHT10_MEASURE_CMD)) != i2c::ERROR_OK) {
|
||||
this->status_set_warning(ESP_LOG_MSG_COMM_FAIL);
|
||||
this->status_set_warning(LOG_STR(ESP_LOG_MSG_COMM_FAIL));
|
||||
return;
|
||||
}
|
||||
this->restart_read_();
|
||||
|
@@ -16,6 +16,7 @@ from esphome.const import (
|
||||
DEVICE_CLASS_ENERGY,
|
||||
DEVICE_CLASS_POWER,
|
||||
DEVICE_CLASS_POWER_FACTOR,
|
||||
DEVICE_CLASS_REACTIVE_POWER,
|
||||
DEVICE_CLASS_VOLTAGE,
|
||||
ICON_CURRENT_AC,
|
||||
ICON_LIGHTBULB,
|
||||
@@ -78,6 +79,7 @@ CONFIG_SCHEMA = (
|
||||
unit_of_measurement=UNIT_VOLT_AMPS_REACTIVE,
|
||||
icon=ICON_LIGHTBULB,
|
||||
accuracy_decimals=2,
|
||||
device_class=DEVICE_CLASS_REACTIVE_POWER,
|
||||
state_class=STATE_CLASS_MEASUREMENT,
|
||||
),
|
||||
cv.Optional(CONF_POWER_FACTOR): sensor.sensor_schema(
|
||||
|
@@ -17,10 +17,12 @@ from esphome.const import (
|
||||
CONF_REACTIVE_POWER,
|
||||
CONF_REVERSE_ACTIVE_ENERGY,
|
||||
CONF_VOLTAGE,
|
||||
DEVICE_CLASS_APPARENT_POWER,
|
||||
DEVICE_CLASS_CURRENT,
|
||||
DEVICE_CLASS_ENERGY,
|
||||
DEVICE_CLASS_POWER,
|
||||
DEVICE_CLASS_POWER_FACTOR,
|
||||
DEVICE_CLASS_REACTIVE_POWER,
|
||||
DEVICE_CLASS_TEMPERATURE,
|
||||
DEVICE_CLASS_VOLTAGE,
|
||||
ENTITY_CATEGORY_DIAGNOSTIC,
|
||||
@@ -100,13 +102,13 @@ ATM90E32_PHASE_SCHEMA = cv.Schema(
|
||||
unit_of_measurement=UNIT_VOLT_AMPS_REACTIVE,
|
||||
icon=ICON_LIGHTBULB,
|
||||
accuracy_decimals=2,
|
||||
device_class=DEVICE_CLASS_POWER,
|
||||
device_class=DEVICE_CLASS_REACTIVE_POWER,
|
||||
state_class=STATE_CLASS_MEASUREMENT,
|
||||
),
|
||||
cv.Optional(CONF_APPARENT_POWER): sensor.sensor_schema(
|
||||
unit_of_measurement=UNIT_VOLT_AMPS,
|
||||
accuracy_decimals=2,
|
||||
device_class=DEVICE_CLASS_POWER,
|
||||
device_class=DEVICE_CLASS_APPARENT_POWER,
|
||||
state_class=STATE_CLASS_MEASUREMENT,
|
||||
),
|
||||
cv.Optional(CONF_POWER_FACTOR): sensor.sensor_schema(
|
||||
|
@@ -12,7 +12,7 @@ constexpr static const uint8_t AXS_READ_TOUCHPAD[11] = {0xb5, 0xab, 0xa5, 0x5a,
|
||||
|
||||
#define ERROR_CHECK(err) \
|
||||
if ((err) != i2c::ERROR_OK) { \
|
||||
this->status_set_warning("Failed to communicate"); \
|
||||
this->status_set_warning(LOG_STR("Failed to communicate")); \
|
||||
return; \
|
||||
}
|
||||
|
||||
|
@@ -493,7 +493,7 @@ void BedJetHub::dump_config() {
|
||||
" ble_client.app_id: %d\n"
|
||||
" ble_client.conn_id: %d",
|
||||
this->get_name().c_str(), this->parent()->app_id, this->parent()->get_conn_id());
|
||||
LOG_UPDATE_INTERVAL(this)
|
||||
LOG_UPDATE_INTERVAL(this);
|
||||
ESP_LOGCONFIG(TAG, " Child components (%d):", this->children_.size());
|
||||
for (auto *child : this->children_) {
|
||||
ESP_LOGCONFIG(TAG, " - %s", child->describe().c_str());
|
||||
|
@@ -149,7 +149,7 @@ void BL0942::setup() {
|
||||
this->write_reg_(BL0942_REG_USR_WRPROT, 0);
|
||||
|
||||
if (this->read_reg_(BL0942_REG_MODE) != mode)
|
||||
this->status_set_warning("BL0942 setup failed!");
|
||||
this->status_set_warning(LOG_STR("BL0942 setup failed!"));
|
||||
|
||||
this->flush();
|
||||
}
|
||||
|
@@ -152,7 +152,7 @@ void CCS811Component::send_env_data_() {
|
||||
void CCS811Component::dump_config() {
|
||||
ESP_LOGCONFIG(TAG, "CCS811");
|
||||
LOG_I2C_DEVICE(this)
|
||||
LOG_UPDATE_INTERVAL(this)
|
||||
LOG_UPDATE_INTERVAL(this);
|
||||
LOG_SENSOR(" ", "CO2 Sensor", this->co2_);
|
||||
LOG_SENSOR(" ", "TVOC Sensor", this->tvoc_);
|
||||
LOG_TEXT_SENSOR(" ", "Firmware Version Sensor", this->version_)
|
||||
|
@@ -64,7 +64,7 @@ bool DallasTemperatureSensor::read_scratch_pad_() {
|
||||
}
|
||||
} else {
|
||||
ESP_LOGW(TAG, "'%s' - reading scratch pad failed bus reset", this->get_name().c_str());
|
||||
this->status_set_warning("bus reset failed");
|
||||
this->status_set_warning(LOG_STR("bus reset failed"));
|
||||
}
|
||||
return success;
|
||||
}
|
||||
@@ -124,7 +124,7 @@ bool DallasTemperatureSensor::check_scratch_pad_() {
|
||||
crc8(this->scratch_pad_, 8));
|
||||
#endif
|
||||
if (!chksum_validity) {
|
||||
this->status_set_warning("scratch pad checksum invalid");
|
||||
this->status_set_warning(LOG_STR("scratch pad checksum invalid"));
|
||||
ESP_LOGD(TAG, "Scratch pad: %02X.%02X.%02X.%02X.%02X.%02X.%02X.%02X.%02X (%02X)", this->scratch_pad_[0],
|
||||
this->scratch_pad_[1], this->scratch_pad_[2], this->scratch_pad_[3], this->scratch_pad_[4],
|
||||
this->scratch_pad_[5], this->scratch_pad_[6], this->scratch_pad_[7], this->scratch_pad_[8],
|
||||
|
@@ -2,11 +2,11 @@
|
||||
|
||||
#include "esphome/core/defines.h"
|
||||
#ifdef USE_OTA
|
||||
#include "esphome/components/ota/ota_backend.h"
|
||||
#include "esphome/components/socket/socket.h"
|
||||
#include "esphome/core/helpers.h"
|
||||
#include "esphome/core/log.h"
|
||||
#include "esphome/core/preferences.h"
|
||||
#include "esphome/components/ota/ota_backend.h"
|
||||
#include "esphome/components/socket/socket.h"
|
||||
|
||||
namespace esphome {
|
||||
|
||||
|
@@ -492,7 +492,7 @@ void EthernetComponent::start_connect_() {
|
||||
global_eth_component->ipv6_count_ = 0;
|
||||
#endif /* USE_NETWORK_IPV6 */
|
||||
this->connect_begin_ = millis();
|
||||
this->status_set_warning("waiting for IP configuration");
|
||||
this->status_set_warning(LOG_STR("waiting for IP configuration"));
|
||||
|
||||
esp_err_t err;
|
||||
err = esp_netif_set_hostname(this->eth_netif_, App.get_name().c_str());
|
||||
|
@@ -11,22 +11,22 @@ static const uint8_t NUMBER_OF_READ_RETRIES = 5;
|
||||
void GDK101Component::update() {
|
||||
uint8_t data[2];
|
||||
if (!this->read_dose_1m_(data)) {
|
||||
this->status_set_warning("Failed to read dose 1m");
|
||||
this->status_set_warning(LOG_STR("Failed to read dose 1m"));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this->read_dose_10m_(data)) {
|
||||
this->status_set_warning("Failed to read dose 10m");
|
||||
this->status_set_warning(LOG_STR("Failed to read dose 10m"));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this->read_status_(data)) {
|
||||
this->status_set_warning("Failed to read status");
|
||||
this->status_set_warning(LOG_STR("Failed to read status"));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this->read_measurement_duration_(data)) {
|
||||
this->status_set_warning("Failed to read measurement duration");
|
||||
this->status_set_warning(LOG_STR("Failed to read measurement duration"));
|
||||
return;
|
||||
}
|
||||
this->status_clear_warning();
|
||||
|
@@ -4,6 +4,7 @@
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
#include "esphome/core/hal.h"
|
||||
|
||||
namespace esphome::gpio_expander {
|
||||
@@ -11,18 +12,27 @@ namespace esphome::gpio_expander {
|
||||
/// @brief A class to cache the read state of a GPIO expander.
|
||||
/// This class caches reads between GPIO Pins which are on the same bank.
|
||||
/// This means that for reading whole Port (ex. 8 pins) component needs only one
|
||||
/// I2C/SPI read per main loop call. It assumes, that one bit in byte identifies one GPIO pin
|
||||
/// I2C/SPI read per main loop call. It assumes that one bit in byte identifies one GPIO pin.
|
||||
///
|
||||
/// Template parameters:
|
||||
/// T - Type which represents internal register. Could be uint8_t or uint16_t. Adjust to
|
||||
/// match size of your internal GPIO bank register.
|
||||
/// N - Number of pins
|
||||
template<typename T, T N> class CachedGpioExpander {
|
||||
/// T - Type which represents internal bank register. Could be uint8_t or uint16_t.
|
||||
/// Choose based on how your I/O expander reads pins:
|
||||
/// * uint8_t: For chips that read banks separately (8 pins at a time)
|
||||
/// Examples: MCP23017 (2x8-bit banks), TCA9555 (2x8-bit banks)
|
||||
/// * uint16_t: For chips that read all pins at once (up to 16 pins)
|
||||
/// Examples: PCF8574/8575 (8/16 pins), PCA9554/9555 (8/16 pins)
|
||||
/// N - Total number of pins (maximum 65535)
|
||||
/// P - Type for pin number parameters (automatically selected based on N:
|
||||
/// uint8_t for N<=256, uint16_t for N>256). Can be explicitly specified
|
||||
/// if needed (e.g., for components like SN74HC165 with >256 pins)
|
||||
template<typename T, uint16_t N, typename P = typename std::conditional<(N > 256), uint16_t, uint8_t>::type>
|
||||
class CachedGpioExpander {
|
||||
public:
|
||||
/// @brief Read the state of the given pin. This will invalidate the cache for the given pin number.
|
||||
/// @param pin Pin number to read
|
||||
/// @return Pin state
|
||||
bool digital_read(T pin) {
|
||||
const uint8_t bank = pin / BANK_SIZE;
|
||||
bool digital_read(P pin) {
|
||||
const P bank = pin / BANK_SIZE;
|
||||
const T pin_mask = (1 << (pin % BANK_SIZE));
|
||||
// Check if specific pin cache is valid
|
||||
if (this->read_cache_valid_[bank] & pin_mask) {
|
||||
@@ -38,21 +48,31 @@ template<typename T, T N> class CachedGpioExpander {
|
||||
return this->digital_read_cache(pin);
|
||||
}
|
||||
|
||||
void digital_write(T pin, bool value) { this->digital_write_hw(pin, value); }
|
||||
void digital_write(P pin, bool value) { this->digital_write_hw(pin, value); }
|
||||
|
||||
protected:
|
||||
/// @brief Call component low level function to read GPIO state from device
|
||||
virtual bool digital_read_hw(T pin) = 0;
|
||||
/// @brief Call component read function from internal cache.
|
||||
virtual bool digital_read_cache(T pin) = 0;
|
||||
/// @brief Call component low level function to write GPIO state to device
|
||||
virtual void digital_write_hw(T pin, bool value) = 0;
|
||||
/// @brief Read GPIO bank from hardware into internal state
|
||||
/// @param pin Pin number (used to determine which bank to read)
|
||||
/// @return true if read succeeded, false on communication error
|
||||
/// @note This does NOT return the pin state. It returns whether the read operation succeeded.
|
||||
/// The actual pin state should be returned by digital_read_cache().
|
||||
virtual bool digital_read_hw(P pin) = 0;
|
||||
|
||||
/// @brief Get cached pin value from internal state
|
||||
/// @param pin Pin number to read
|
||||
/// @return Pin state (true = HIGH, false = LOW)
|
||||
virtual bool digital_read_cache(P pin) = 0;
|
||||
|
||||
/// @brief Write GPIO state to hardware
|
||||
/// @param pin Pin number to write
|
||||
/// @param value Pin state to write (true = HIGH, false = LOW)
|
||||
virtual void digital_write_hw(P pin, bool value) = 0;
|
||||
|
||||
/// @brief Invalidate cache. This function should be called in component loop().
|
||||
void reset_pin_cache_() { memset(this->read_cache_valid_, 0x00, CACHE_SIZE_BYTES); }
|
||||
|
||||
static constexpr uint8_t BITS_PER_BYTE = 8;
|
||||
static constexpr uint8_t BANK_SIZE = sizeof(T) * BITS_PER_BYTE;
|
||||
static constexpr uint16_t BITS_PER_BYTE = 8;
|
||||
static constexpr uint16_t BANK_SIZE = sizeof(T) * BITS_PER_BYTE;
|
||||
static constexpr size_t BANKS = N / BANK_SIZE;
|
||||
static constexpr size_t CACHE_SIZE_BYTES = BANKS * sizeof(T);
|
||||
|
||||
|
@@ -57,7 +57,7 @@ void GroveGasMultichannelV2Component::update() {
|
||||
void GroveGasMultichannelV2Component::dump_config() {
|
||||
ESP_LOGCONFIG(TAG, "Grove Multichannel Gas Sensor V2");
|
||||
LOG_I2C_DEVICE(this)
|
||||
LOG_UPDATE_INTERVAL(this)
|
||||
LOG_UPDATE_INTERVAL(this);
|
||||
LOG_SENSOR(" ", "Nitrogen Dioxide", this->nitrogen_dioxide_sensor_);
|
||||
LOG_SENSOR(" ", "Ethanol", this->ethanol_sensor_);
|
||||
LOG_SENSOR(" ", "Carbon Monoxide", this->carbon_monoxide_sensor_);
|
||||
|
@@ -20,7 +20,7 @@ static const size_t MAX_BUTTONS = 4; // max number of buttons scanned
|
||||
|
||||
#define ERROR_CHECK(err) \
|
||||
if ((err) != i2c::ERROR_OK) { \
|
||||
this->status_set_warning(ESP_LOG_MSG_COMM_FAIL); \
|
||||
this->status_set_warning(LOG_STR(ESP_LOG_MSG_COMM_FAIL)); \
|
||||
return; \
|
||||
}
|
||||
|
||||
|
@@ -42,7 +42,7 @@ void HLW8012Component::dump_config() {
|
||||
" Current resistor: %.1f mΩ\n"
|
||||
" Voltage Divider: %.1f",
|
||||
this->change_mode_every_, this->current_resistor_ * 1000.0f, this->voltage_divider_);
|
||||
LOG_UPDATE_INTERVAL(this)
|
||||
LOG_UPDATE_INTERVAL(this);
|
||||
LOG_SENSOR(" ", "Voltage", this->voltage_sensor_);
|
||||
LOG_SENSOR(" ", "Current", this->current_sensor_);
|
||||
LOG_SENSOR(" ", "Power", this->power_sensor_);
|
||||
|
@@ -15,7 +15,7 @@ static const char *const TAG = "honeywellabp2";
|
||||
void HONEYWELLABP2Sensor::read_sensor_data() {
|
||||
if (this->read(raw_data_, 7) != i2c::ERROR_OK) {
|
||||
ESP_LOGE(TAG, ESP_LOG_MSG_COMM_FAIL);
|
||||
this->status_set_warning("couldn't read sensor data");
|
||||
this->status_set_warning(LOG_STR("couldn't read sensor data"));
|
||||
return;
|
||||
}
|
||||
float press_counts = encode_uint24(raw_data_[1], raw_data_[2], raw_data_[3]); // calculate digital pressure counts
|
||||
@@ -31,7 +31,7 @@ void HONEYWELLABP2Sensor::read_sensor_data() {
|
||||
void HONEYWELLABP2Sensor::start_measurement() {
|
||||
if (this->write(i2c_cmd_, 3) != i2c::ERROR_OK) {
|
||||
ESP_LOGE(TAG, ESP_LOG_MSG_COMM_FAIL);
|
||||
this->status_set_warning("couldn't start measurement");
|
||||
this->status_set_warning(LOG_STR("couldn't start measurement"));
|
||||
return;
|
||||
}
|
||||
this->measurement_running_ = true;
|
||||
@@ -40,7 +40,7 @@ void HONEYWELLABP2Sensor::start_measurement() {
|
||||
bool HONEYWELLABP2Sensor::is_measurement_ready() {
|
||||
if (this->read(raw_data_, 1) != i2c::ERROR_OK) {
|
||||
ESP_LOGE(TAG, ESP_LOG_MSG_COMM_FAIL);
|
||||
this->status_set_warning("couldn't check measurement");
|
||||
this->status_set_warning(LOG_STR("couldn't check measurement"));
|
||||
return false;
|
||||
}
|
||||
if ((raw_data_[0] & (0x1 << STATUS_BIT_BUSY)) > 0) {
|
||||
@@ -53,7 +53,7 @@ bool HONEYWELLABP2Sensor::is_measurement_ready() {
|
||||
void HONEYWELLABP2Sensor::measurement_timeout() {
|
||||
ESP_LOGE(TAG, "Timeout!");
|
||||
this->measurement_running_ = false;
|
||||
this->status_set_warning("measurement timed out");
|
||||
this->status_set_warning(LOG_STR("measurement timed out"));
|
||||
}
|
||||
|
||||
float HONEYWELLABP2Sensor::get_pressure() { return this->last_pressure_; }
|
||||
|
@@ -246,14 +246,35 @@ void Logger::add_on_log_callback(std::function<void(uint8_t, const char *, const
|
||||
this->log_callback_.add(std::move(callback));
|
||||
}
|
||||
float Logger::get_setup_priority() const { return setup_priority::BUS + 500.0f; }
|
||||
|
||||
#ifdef USE_STORE_LOG_STR_IN_FLASH
|
||||
// ESP8266: PSTR() cannot be used in array initializers, so we need to declare
|
||||
// each string separately as a global constant first
|
||||
static const char LOG_LEVEL_NONE[] PROGMEM = "NONE";
|
||||
static const char LOG_LEVEL_ERROR[] PROGMEM = "ERROR";
|
||||
static const char LOG_LEVEL_WARN[] PROGMEM = "WARN";
|
||||
static const char LOG_LEVEL_INFO[] PROGMEM = "INFO";
|
||||
static const char LOG_LEVEL_CONFIG[] PROGMEM = "CONFIG";
|
||||
static const char LOG_LEVEL_DEBUG[] PROGMEM = "DEBUG";
|
||||
static const char LOG_LEVEL_VERBOSE[] PROGMEM = "VERBOSE";
|
||||
static const char LOG_LEVEL_VERY_VERBOSE[] PROGMEM = "VERY_VERBOSE";
|
||||
|
||||
static const LogString *const LOG_LEVELS[] = {
|
||||
reinterpret_cast<const LogString *>(LOG_LEVEL_NONE), reinterpret_cast<const LogString *>(LOG_LEVEL_ERROR),
|
||||
reinterpret_cast<const LogString *>(LOG_LEVEL_WARN), reinterpret_cast<const LogString *>(LOG_LEVEL_INFO),
|
||||
reinterpret_cast<const LogString *>(LOG_LEVEL_CONFIG), reinterpret_cast<const LogString *>(LOG_LEVEL_DEBUG),
|
||||
reinterpret_cast<const LogString *>(LOG_LEVEL_VERBOSE), reinterpret_cast<const LogString *>(LOG_LEVEL_VERY_VERBOSE),
|
||||
};
|
||||
#else
|
||||
static const char *const LOG_LEVELS[] = {"NONE", "ERROR", "WARN", "INFO", "CONFIG", "DEBUG", "VERBOSE", "VERY_VERBOSE"};
|
||||
#endif
|
||||
|
||||
void Logger::dump_config() {
|
||||
ESP_LOGCONFIG(TAG,
|
||||
"Logger:\n"
|
||||
" Max Level: %s\n"
|
||||
" Initial Level: %s",
|
||||
LOG_LEVELS[ESPHOME_LOG_LEVEL], LOG_LEVELS[this->current_level_]);
|
||||
LOG_STR_ARG(LOG_LEVELS[ESPHOME_LOG_LEVEL]), LOG_STR_ARG(LOG_LEVELS[this->current_level_]));
|
||||
#ifndef USE_HOST
|
||||
ESP_LOGCONFIG(TAG,
|
||||
" Log Baud Rate: %" PRIu32 "\n"
|
||||
@@ -267,14 +288,14 @@ void Logger::dump_config() {
|
||||
#endif
|
||||
|
||||
for (auto &it : this->log_levels_) {
|
||||
ESP_LOGCONFIG(TAG, " Level for '%s': %s", it.first.c_str(), LOG_LEVELS[it.second]);
|
||||
ESP_LOGCONFIG(TAG, " Level for '%s': %s", it.first.c_str(), LOG_STR_ARG(LOG_LEVELS[it.second]));
|
||||
}
|
||||
}
|
||||
|
||||
void Logger::set_log_level(uint8_t level) {
|
||||
if (level > ESPHOME_LOG_LEVEL) {
|
||||
level = ESPHOME_LOG_LEVEL;
|
||||
ESP_LOGW(TAG, "Cannot set log level higher than pre-compiled %s", LOG_LEVELS[ESPHOME_LOG_LEVEL]);
|
||||
ESP_LOGW(TAG, "Cannot set log level higher than pre-compiled %s", LOG_STR_ARG(LOG_LEVELS[ESPHOME_LOG_LEVEL]));
|
||||
}
|
||||
this->current_level_ = level;
|
||||
this->level_callback_.call(level);
|
||||
|
@@ -6,7 +6,7 @@ namespace m5stack_8angle {
|
||||
void M5Stack8AngleSwitchBinarySensor::update() {
|
||||
int8_t out = this->parent_->read_switch();
|
||||
if (out == -1) {
|
||||
this->status_set_warning("Could not read binary sensor state from M5Stack 8Angle.");
|
||||
this->status_set_warning(LOG_STR("Could not read binary sensor state from M5Stack 8Angle."));
|
||||
return;
|
||||
}
|
||||
this->publish_state(out != 0);
|
||||
|
@@ -7,7 +7,7 @@ void M5Stack8AngleKnobSensor::update() {
|
||||
if (this->parent_ != nullptr) {
|
||||
int32_t raw_pos = this->parent_->read_knob_pos_raw(this->channel_, this->bits_);
|
||||
if (raw_pos == -1) {
|
||||
this->status_set_warning("Could not read knob position from M5Stack 8Angle.");
|
||||
this->status_set_warning(LOG_STR("Could not read knob position from M5Stack 8Angle."));
|
||||
return;
|
||||
}
|
||||
if (this->raw_) {
|
||||
|
@@ -22,7 +22,7 @@ void MAX17043Component::update() {
|
||||
|
||||
if (this->voltage_sensor_ != nullptr) {
|
||||
if (!this->read_byte_16(MAX17043_VCELL, &raw_voltage)) {
|
||||
this->status_set_warning("Unable to read MAX17043_VCELL");
|
||||
this->status_set_warning(LOG_STR("Unable to read MAX17043_VCELL"));
|
||||
} else {
|
||||
float voltage = (1.25 * (float) (raw_voltage >> 4)) / 1000.0;
|
||||
this->voltage_sensor_->publish_state(voltage);
|
||||
@@ -31,7 +31,7 @@ void MAX17043Component::update() {
|
||||
}
|
||||
if (this->battery_remaining_sensor_ != nullptr) {
|
||||
if (!this->read_byte_16(MAX17043_SOC, &raw_percent)) {
|
||||
this->status_set_warning("Unable to read MAX17043_SOC");
|
||||
this->status_set_warning(LOG_STR("Unable to read MAX17043_SOC"));
|
||||
} else {
|
||||
float percent = (float) ((raw_percent >> 8) + 0.003906f * (raw_percent & 0x00ff));
|
||||
this->battery_remaining_sensor_->publish_state(percent);
|
||||
|
@@ -8,7 +8,7 @@ static const char *const TAG = "mcp23x08_base";
|
||||
|
||||
bool MCP23X08Base::digital_read_hw(uint8_t pin) {
|
||||
if (!this->read_reg(mcp23x08_base::MCP23X08_GPIO, &this->input_mask_)) {
|
||||
this->status_set_warning(ESP_LOG_MSG_COMM_FAIL);
|
||||
this->status_set_warning(LOG_STR(ESP_LOG_MSG_COMM_FAIL));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@@ -11,13 +11,13 @@ bool MCP23X17Base::digital_read_hw(uint8_t pin) {
|
||||
uint8_t data;
|
||||
if (pin < 8) {
|
||||
if (!this->read_reg(mcp23x17_base::MCP23X17_GPIOA, &data)) {
|
||||
this->status_set_warning(ESP_LOG_MSG_COMM_FAIL);
|
||||
this->status_set_warning(LOG_STR(ESP_LOG_MSG_COMM_FAIL));
|
||||
return false;
|
||||
}
|
||||
this->input_mask_ = encode_uint16(this->input_mask_ >> 8, data);
|
||||
} else {
|
||||
if (!this->read_reg(mcp23x17_base::MCP23X17_GPIOB, &data)) {
|
||||
this->status_set_warning(ESP_LOG_MSG_COMM_FAIL);
|
||||
this->status_set_warning(LOG_STR(ESP_LOG_MSG_COMM_FAIL));
|
||||
return false;
|
||||
}
|
||||
this->input_mask_ = encode_uint16(data, this->input_mask_ & 0xFF);
|
||||
|
@@ -14,6 +14,7 @@ from esphome.const import (
|
||||
|
||||
CODEOWNERS = ["@Mat931"]
|
||||
DEPENDENCIES = ["i2c"]
|
||||
AUTO_LOAD = ["gpio_expander"]
|
||||
MULTI_CONF = True
|
||||
pca6416a_ns = cg.esphome_ns.namespace("pca6416a")
|
||||
|
||||
|
@@ -51,6 +51,11 @@ void PCA6416AComponent::setup() {
|
||||
this->status_has_error());
|
||||
}
|
||||
|
||||
void PCA6416AComponent::loop() {
|
||||
// Invalidate cache at the start of each loop
|
||||
this->reset_pin_cache_();
|
||||
}
|
||||
|
||||
void PCA6416AComponent::dump_config() {
|
||||
if (this->has_pullup_) {
|
||||
ESP_LOGCONFIG(TAG, "PCAL6416A:");
|
||||
@@ -63,15 +68,25 @@ void PCA6416AComponent::dump_config() {
|
||||
}
|
||||
}
|
||||
|
||||
bool PCA6416AComponent::digital_read(uint8_t pin) {
|
||||
uint8_t bit = pin % 8;
|
||||
bool PCA6416AComponent::digital_read_hw(uint8_t pin) {
|
||||
uint8_t reg_addr = pin < 8 ? PCA6416A_INPUT0 : PCA6416A_INPUT1;
|
||||
uint8_t value = 0;
|
||||
this->read_register_(reg_addr, &value);
|
||||
return value & (1 << bit);
|
||||
if (!this->read_register_(reg_addr, &value)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Update the appropriate part of input_mask_
|
||||
if (pin < 8) {
|
||||
this->input_mask_ = (this->input_mask_ & 0xFF00) | value;
|
||||
} else {
|
||||
this->input_mask_ = (this->input_mask_ & 0x00FF) | (uint16_t(value) << 8);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void PCA6416AComponent::digital_write(uint8_t pin, bool value) {
|
||||
bool PCA6416AComponent::digital_read_cache(uint8_t pin) { return this->input_mask_ & (1 << pin); }
|
||||
|
||||
void PCA6416AComponent::digital_write_hw(uint8_t pin, bool value) {
|
||||
uint8_t reg_addr = pin < 8 ? PCA6416A_OUTPUT0 : PCA6416A_OUTPUT1;
|
||||
this->update_register_(pin, value, reg_addr);
|
||||
}
|
||||
|
@@ -3,20 +3,20 @@
|
||||
#include "esphome/core/component.h"
|
||||
#include "esphome/core/hal.h"
|
||||
#include "esphome/components/i2c/i2c.h"
|
||||
#include "esphome/components/gpio_expander/cached_gpio.h"
|
||||
|
||||
namespace esphome {
|
||||
namespace pca6416a {
|
||||
|
||||
class PCA6416AComponent : public Component, public i2c::I2CDevice {
|
||||
class PCA6416AComponent : public Component,
|
||||
public i2c::I2CDevice,
|
||||
public gpio_expander::CachedGpioExpander<uint8_t, 16> {
|
||||
public:
|
||||
PCA6416AComponent() = default;
|
||||
|
||||
/// Check i2c availability and setup masks
|
||||
void setup() override;
|
||||
/// Helper function to read the value of a pin.
|
||||
bool digital_read(uint8_t pin);
|
||||
/// Helper function to write the value of a pin.
|
||||
void digital_write(uint8_t pin, bool value);
|
||||
void loop() override;
|
||||
/// Helper function to set the pin mode of a pin.
|
||||
void pin_mode(uint8_t pin, gpio::Flags flags);
|
||||
|
||||
@@ -25,6 +25,11 @@ class PCA6416AComponent : public Component, public i2c::I2CDevice {
|
||||
void dump_config() override;
|
||||
|
||||
protected:
|
||||
// Virtual methods from CachedGpioExpander
|
||||
bool digital_read_hw(uint8_t pin) override;
|
||||
bool digital_read_cache(uint8_t pin) override;
|
||||
void digital_write_hw(uint8_t pin, bool value) override;
|
||||
|
||||
bool read_register_(uint8_t reg, uint8_t *value);
|
||||
bool write_register_(uint8_t reg, uint8_t value);
|
||||
void update_register_(uint8_t pin, bool pin_value, uint8_t reg_addr);
|
||||
@@ -32,6 +37,8 @@ class PCA6416AComponent : public Component, public i2c::I2CDevice {
|
||||
/// The mask to write as output state - 1 means HIGH, 0 means LOW
|
||||
uint8_t output_0_{0x00};
|
||||
uint8_t output_1_{0x00};
|
||||
/// Cache for input values (16-bit combined for both banks)
|
||||
uint16_t input_mask_{0x00};
|
||||
/// Storage for last I2C error seen
|
||||
esphome::i2c::ErrorCode last_error_;
|
||||
/// Only the PCAL6416A has pull-up resistors
|
||||
|
@@ -68,7 +68,7 @@ bool PI4IOE5V6408Component::read_gpio_outputs_() {
|
||||
|
||||
uint8_t data;
|
||||
if (!this->read_byte(PI4IOE5V6408_REGISTER_OUT_SET, &data)) {
|
||||
this->status_set_warning("Failed to read output register");
|
||||
this->status_set_warning(LOG_STR("Failed to read output register"));
|
||||
return false;
|
||||
}
|
||||
this->output_mask_ = data;
|
||||
@@ -82,7 +82,7 @@ bool PI4IOE5V6408Component::read_gpio_modes_() {
|
||||
|
||||
uint8_t data;
|
||||
if (!this->read_byte(PI4IOE5V6408_REGISTER_IO_DIR, &data)) {
|
||||
this->status_set_warning("Failed to read GPIO modes");
|
||||
this->status_set_warning(LOG_STR("Failed to read GPIO modes"));
|
||||
return false;
|
||||
}
|
||||
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_VERBOSE
|
||||
@@ -99,7 +99,7 @@ bool PI4IOE5V6408Component::digital_read_hw(uint8_t pin) {
|
||||
|
||||
uint8_t data;
|
||||
if (!this->read_byte(PI4IOE5V6408_REGISTER_IN_STATE, &data)) {
|
||||
this->status_set_warning("Failed to read GPIO state");
|
||||
this->status_set_warning(LOG_STR("Failed to read GPIO state"));
|
||||
return false;
|
||||
}
|
||||
this->input_mask_ = data;
|
||||
@@ -117,7 +117,7 @@ void PI4IOE5V6408Component::digital_write_hw(uint8_t pin, bool value) {
|
||||
this->output_mask_ &= ~(1 << pin);
|
||||
}
|
||||
if (!this->write_byte(PI4IOE5V6408_REGISTER_OUT_SET, this->output_mask_)) {
|
||||
this->status_set_warning("Failed to write output register");
|
||||
this->status_set_warning(LOG_STR("Failed to write output register"));
|
||||
return;
|
||||
}
|
||||
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_VERBOSE
|
||||
@@ -131,15 +131,15 @@ bool PI4IOE5V6408Component::write_gpio_modes_() {
|
||||
return false;
|
||||
|
||||
if (!this->write_byte(PI4IOE5V6408_REGISTER_IO_DIR, this->mode_mask_)) {
|
||||
this->status_set_warning("Failed to write GPIO modes");
|
||||
this->status_set_warning(LOG_STR("Failed to write GPIO modes"));
|
||||
return false;
|
||||
}
|
||||
if (!this->write_byte(PI4IOE5V6408_REGISTER_PULL_SELECT, this->pull_up_down_mask_)) {
|
||||
this->status_set_warning("Failed to write GPIO pullup/pulldown");
|
||||
this->status_set_warning(LOG_STR("Failed to write GPIO pullup/pulldown"));
|
||||
return false;
|
||||
}
|
||||
if (!this->write_byte(PI4IOE5V6408_REGISTER_PULL_ENABLE, this->pull_enable_mask_)) {
|
||||
this->status_set_warning("Failed to write GPIO pull enable");
|
||||
this->status_set_warning(LOG_STR("Failed to write GPIO pull enable"));
|
||||
return false;
|
||||
}
|
||||
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_VERBOSE
|
||||
|
@@ -18,7 +18,7 @@ void IRAM_ATTR PulseWidthSensorStore::gpio_intr(PulseWidthSensorStore *arg) {
|
||||
|
||||
void PulseWidthSensor::dump_config() {
|
||||
LOG_SENSOR("", "Pulse Width", this);
|
||||
LOG_UPDATE_INTERVAL(this)
|
||||
LOG_UPDATE_INTERVAL(this);
|
||||
LOG_PIN(" Pin: ", this->pin_);
|
||||
}
|
||||
void PulseWidthSensor::update() {
|
||||
|
@@ -211,7 +211,7 @@ void SGP4xComponent::measure_raw_() {
|
||||
|
||||
if (!this->write_command(command, data, 2)) {
|
||||
ESP_LOGD(TAG, "write error (%d)", this->last_error_);
|
||||
this->status_set_warning("measurement request failed");
|
||||
this->status_set_warning(LOG_STR("measurement request failed"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ void SGP4xComponent::measure_raw_() {
|
||||
raw_data[1] = 0;
|
||||
if (!this->read_data(raw_data, response_words)) {
|
||||
ESP_LOGD(TAG, "read error (%d)", this->last_error_);
|
||||
this->status_set_warning("measurement read failed");
|
||||
this->status_set_warning(LOG_STR("measurement read failed"));
|
||||
this->voc_index_ = this->nox_index_ = UINT16_MAX;
|
||||
return;
|
||||
}
|
||||
|
@@ -65,7 +65,7 @@ void SHT4XComponent::update() {
|
||||
// Send command
|
||||
if (!this->write_command(MEASURECOMMANDS[this->precision_])) {
|
||||
// Warning will be printed only if warning status is not set yet
|
||||
this->status_set_warning("Failed to send measurement command");
|
||||
this->status_set_warning(LOG_STR("Failed to send measurement command"));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -56,7 +56,7 @@ void SoundLevelComponent::loop() {
|
||||
}
|
||||
} else {
|
||||
if (!this->status_has_warning()) {
|
||||
this->status_set_warning("Microphone isn't running, can't compute statistics");
|
||||
this->status_set_warning(LOG_STR("Microphone isn't running, can't compute statistics"));
|
||||
|
||||
// Deallocate buffers, if necessary
|
||||
this->stop_();
|
||||
|
@@ -50,7 +50,7 @@ bool TCA9555Component::read_gpio_outputs_() {
|
||||
return false;
|
||||
uint8_t data[2];
|
||||
if (!this->read_bytes(TCA9555_OUTPUT_PORT_REGISTER_0, data, 2)) {
|
||||
this->status_set_warning("Failed to read output register");
|
||||
this->status_set_warning(LOG_STR("Failed to read output register"));
|
||||
return false;
|
||||
}
|
||||
this->output_mask_ = (uint16_t(data[1]) << 8) | (uint16_t(data[0]) << 0);
|
||||
@@ -64,7 +64,7 @@ bool TCA9555Component::read_gpio_modes_() {
|
||||
uint8_t data[2];
|
||||
bool success = this->read_bytes(TCA9555_CONFIGURATION_PORT_0, data, 2);
|
||||
if (!success) {
|
||||
this->status_set_warning("Failed to read mode register");
|
||||
this->status_set_warning(LOG_STR("Failed to read mode register"));
|
||||
return false;
|
||||
}
|
||||
this->mode_mask_ = (uint16_t(data[1]) << 8) | (uint16_t(data[0]) << 0);
|
||||
@@ -79,7 +79,7 @@ bool TCA9555Component::digital_read_hw(uint8_t pin) {
|
||||
uint8_t bank_number = pin < 8 ? 0 : 1;
|
||||
uint8_t register_to_read = bank_number ? TCA9555_INPUT_PORT_REGISTER_1 : TCA9555_INPUT_PORT_REGISTER_0;
|
||||
if (!this->read_bytes(register_to_read, &data, 1)) {
|
||||
this->status_set_warning("Failed to read input register");
|
||||
this->status_set_warning(LOG_STR("Failed to read input register"));
|
||||
return false;
|
||||
}
|
||||
uint8_t second_half = this->input_mask_ >> 8;
|
||||
@@ -108,7 +108,7 @@ void TCA9555Component::digital_write_hw(uint8_t pin, bool value) {
|
||||
data[0] = this->output_mask_;
|
||||
data[1] = this->output_mask_ >> 8;
|
||||
if (!this->write_bytes(TCA9555_OUTPUT_PORT_REGISTER_0, data, 2)) {
|
||||
this->status_set_warning("Failed to write output register");
|
||||
this->status_set_warning(LOG_STR("Failed to write output register"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ bool TCA9555Component::write_gpio_modes_() {
|
||||
data[0] = this->mode_mask_;
|
||||
data[1] = this->mode_mask_ >> 8;
|
||||
if (!this->write_bytes(TCA9555_CONFIGURATION_PORT_0, data, 2)) {
|
||||
this->status_set_warning("Failed to write mode register");
|
||||
this->status_set_warning(LOG_STR("Failed to write mode register"));
|
||||
return false;
|
||||
}
|
||||
this->status_clear_warning();
|
||||
|
@@ -32,7 +32,7 @@ void TMP1075Sensor::update() {
|
||||
uint16_t regvalue;
|
||||
if (!read_byte_16(REG_TEMP, ®value)) {
|
||||
ESP_LOGW(TAG, "'%s' - unable to read temperature register", this->name_.c_str());
|
||||
this->status_set_warning("can't read");
|
||||
this->status_set_warning(LOG_STR("can't read"));
|
||||
return;
|
||||
}
|
||||
this->status_clear_warning();
|
||||
|
@@ -28,12 +28,12 @@ void UDPComponent::setup() {
|
||||
int enable = 1;
|
||||
auto err = this->broadcast_socket_->setsockopt(SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int));
|
||||
if (err != 0) {
|
||||
this->status_set_warning("Socket unable to set reuseaddr");
|
||||
this->status_set_warning(LOG_STR("Socket unable to set reuseaddr"));
|
||||
// we can still continue
|
||||
}
|
||||
err = this->broadcast_socket_->setsockopt(SOL_SOCKET, SO_BROADCAST, &enable, sizeof(int));
|
||||
if (err != 0) {
|
||||
this->status_set_warning("Socket unable to set broadcast");
|
||||
this->status_set_warning(LOG_STR("Socket unable to set broadcast"));
|
||||
}
|
||||
}
|
||||
// create listening socket if we either want to subscribe to providers, or need to listen
|
||||
@@ -55,7 +55,7 @@ void UDPComponent::setup() {
|
||||
int enable = 1;
|
||||
err = this->listen_socket_->setsockopt(SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable));
|
||||
if (err != 0) {
|
||||
this->status_set_warning("Socket unable to set reuseaddr");
|
||||
this->status_set_warning(LOG_STR("Socket unable to set reuseaddr"));
|
||||
// we can still continue
|
||||
}
|
||||
struct sockaddr_in server {};
|
||||
|
@@ -104,7 +104,7 @@ void UFireECComponent::write_data_(uint8_t reg, float data) {
|
||||
void UFireECComponent::dump_config() {
|
||||
ESP_LOGCONFIG(TAG, "uFire-EC");
|
||||
LOG_I2C_DEVICE(this)
|
||||
LOG_UPDATE_INTERVAL(this)
|
||||
LOG_UPDATE_INTERVAL(this);
|
||||
LOG_SENSOR(" ", "EC Sensor", this->ec_sensor_);
|
||||
LOG_SENSOR(" ", "Temperature Sensor", this->temperature_sensor_);
|
||||
LOG_SENSOR(" ", "Temperature Sensor external", this->temperature_sensor_external_);
|
||||
|
@@ -141,7 +141,7 @@ void UFireISEComponent::write_data_(uint8_t reg, float data) {
|
||||
void UFireISEComponent::dump_config() {
|
||||
ESP_LOGCONFIG(TAG, "uFire-ISE");
|
||||
LOG_I2C_DEVICE(this)
|
||||
LOG_UPDATE_INTERVAL(this)
|
||||
LOG_UPDATE_INTERVAL(this);
|
||||
LOG_SENSOR(" ", "PH Sensor", this->ph_sensor_);
|
||||
LOG_SENSOR(" ", "Temperature Sensor", this->temperature_sensor_);
|
||||
LOG_SENSOR(" ", "Temperature Sensor external", this->temperature_sensor_external_);
|
||||
|
@@ -266,7 +266,7 @@ void USBUartTypeCdcAcm::on_connected() {
|
||||
for (auto *channel : this->channels_) {
|
||||
if (i == cdc_devs.size()) {
|
||||
ESP_LOGE(TAG, "No configuration found for channel %d", channel->index_);
|
||||
this->status_set_warning("No configuration found for channel");
|
||||
this->status_set_warning(LOG_STR("No configuration found for channel"));
|
||||
break;
|
||||
}
|
||||
channel->cdc_dev_ = cdc_devs[i++];
|
||||
|
@@ -74,12 +74,12 @@ void WakeOnLanButton::setup() {
|
||||
int enable = 1;
|
||||
auto err = this->broadcast_socket_->setsockopt(SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int));
|
||||
if (err != 0) {
|
||||
this->status_set_warning("Socket unable to set reuseaddr");
|
||||
this->status_set_warning(LOG_STR("Socket unable to set reuseaddr"));
|
||||
// we can still continue
|
||||
}
|
||||
err = this->broadcast_socket_->setsockopt(SOL_SOCKET, SO_BROADCAST, &enable, sizeof(int));
|
||||
if (err != 0) {
|
||||
this->status_set_warning("Socket unable to set broadcast");
|
||||
this->status_set_warning(LOG_STR("Socket unable to set broadcast"));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@@ -181,7 +181,7 @@ void WaveshareEPaper2P13InV3::dump_config() {
|
||||
LOG_PIN(" Reset Pin: ", this->reset_pin_)
|
||||
LOG_PIN(" DC Pin: ", this->dc_pin_)
|
||||
LOG_PIN(" Busy Pin: ", this->busy_pin_)
|
||||
LOG_UPDATE_INTERVAL(this)
|
||||
LOG_UPDATE_INTERVAL(this);
|
||||
}
|
||||
|
||||
void WaveshareEPaper2P13InV3::set_full_update_every(uint32_t full_update_every) {
|
||||
|
@@ -148,7 +148,7 @@ void WiFiComponent::loop() {
|
||||
|
||||
switch (this->state_) {
|
||||
case WIFI_COMPONENT_STATE_COOLDOWN: {
|
||||
this->status_set_warning("waiting to reconnect");
|
||||
this->status_set_warning(LOG_STR("waiting to reconnect"));
|
||||
if (millis() - this->action_started_ > 5000) {
|
||||
if (this->fast_connect_ || this->retry_hidden_) {
|
||||
if (!this->selected_ap_.get_bssid().has_value())
|
||||
@@ -161,13 +161,13 @@ void WiFiComponent::loop() {
|
||||
break;
|
||||
}
|
||||
case WIFI_COMPONENT_STATE_STA_SCANNING: {
|
||||
this->status_set_warning("scanning for networks");
|
||||
this->status_set_warning(LOG_STR("scanning for networks"));
|
||||
this->check_scanning_finished();
|
||||
break;
|
||||
}
|
||||
case WIFI_COMPONENT_STATE_STA_CONNECTING:
|
||||
case WIFI_COMPONENT_STATE_STA_CONNECTING_2: {
|
||||
this->status_set_warning("associating to network");
|
||||
this->status_set_warning(LOG_STR("associating to network"));
|
||||
this->check_connecting_finished();
|
||||
break;
|
||||
}
|
||||
|
@@ -16,7 +16,6 @@
|
||||
namespace esphome {
|
||||
|
||||
static const char *const TAG = "component";
|
||||
static const char *const UNSPECIFIED_MESSAGE = "unspecified";
|
||||
|
||||
// Global vectors for component data that doesn't belong in every instance.
|
||||
// Using vector instead of unordered_map for both because:
|
||||
@@ -143,7 +142,7 @@ void Component::call_dump_config() {
|
||||
}
|
||||
}
|
||||
ESP_LOGE(TAG, " %s is marked FAILED: %s", this->get_component_source(),
|
||||
error_msg ? error_msg : UNSPECIFIED_MESSAGE);
|
||||
error_msg ? error_msg : LOG_STR_LITERAL("unspecified"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -280,20 +279,32 @@ bool Component::is_ready() const {
|
||||
bool Component::can_proceed() { return true; }
|
||||
bool Component::status_has_warning() const { return this->component_state_ & STATUS_LED_WARNING; }
|
||||
bool Component::status_has_error() const { return this->component_state_ & STATUS_LED_ERROR; }
|
||||
|
||||
void Component::status_set_warning(const char *message) {
|
||||
// Don't spam the log. This risks missing different warning messages though.
|
||||
if ((this->component_state_ & STATUS_LED_WARNING) != 0)
|
||||
return;
|
||||
this->component_state_ |= STATUS_LED_WARNING;
|
||||
App.app_state_ |= STATUS_LED_WARNING;
|
||||
ESP_LOGW(TAG, "%s set Warning flag: %s", this->get_component_source(), message ? message : UNSPECIFIED_MESSAGE);
|
||||
ESP_LOGW(TAG, "%s set Warning flag: %s", this->get_component_source(),
|
||||
message ? message : LOG_STR_LITERAL("unspecified"));
|
||||
}
|
||||
void Component::status_set_warning(const LogString *message) {
|
||||
// Don't spam the log. This risks missing different warning messages though.
|
||||
if ((this->component_state_ & STATUS_LED_WARNING) != 0)
|
||||
return;
|
||||
this->component_state_ |= STATUS_LED_WARNING;
|
||||
App.app_state_ |= STATUS_LED_WARNING;
|
||||
ESP_LOGW(TAG, "%s set Warning flag: %s", this->get_component_source(),
|
||||
message ? LOG_STR_ARG(message) : LOG_STR_LITERAL("unspecified"));
|
||||
}
|
||||
void Component::status_set_error(const char *message) {
|
||||
if ((this->component_state_ & STATUS_LED_ERROR) != 0)
|
||||
return;
|
||||
this->component_state_ |= STATUS_LED_ERROR;
|
||||
App.app_state_ |= STATUS_LED_ERROR;
|
||||
ESP_LOGE(TAG, "%s set Error flag: %s", this->get_component_source(), message ? message : UNSPECIFIED_MESSAGE);
|
||||
ESP_LOGE(TAG, "%s set Error flag: %s", this->get_component_source(),
|
||||
message ? message : LOG_STR_LITERAL("unspecified"));
|
||||
if (message != nullptr) {
|
||||
// Lazy allocate the error messages vector if needed
|
||||
if (!component_error_messages) {
|
||||
@@ -331,6 +342,18 @@ void Component::status_momentary_error(const std::string &name, uint32_t length)
|
||||
this->set_timeout(name, length, [this]() { this->status_clear_error(); });
|
||||
}
|
||||
void Component::dump_config() {}
|
||||
|
||||
// Function implementation of LOG_UPDATE_INTERVAL macro to reduce code size
|
||||
void log_update_interval(const char *tag, PollingComponent *component) {
|
||||
uint32_t update_interval = component->get_update_interval();
|
||||
if (update_interval == SCHEDULER_DONT_RUN) {
|
||||
ESP_LOGCONFIG(tag, " Update Interval: never");
|
||||
} else if (update_interval < 100) {
|
||||
ESP_LOGCONFIG(tag, " Update Interval: %.3fs", update_interval / 1000.0f);
|
||||
} else {
|
||||
ESP_LOGCONFIG(tag, " Update Interval: %.1fs", update_interval / 1000.0f);
|
||||
}
|
||||
}
|
||||
float Component::get_actual_setup_priority() const {
|
||||
// Check if there's an override in the global vector
|
||||
if (setup_priority_overrides) {
|
||||
|
@@ -9,6 +9,9 @@
|
||||
|
||||
namespace esphome {
|
||||
|
||||
// Forward declaration for LogString
|
||||
struct LogString;
|
||||
|
||||
/** Default setup priorities for components of different types.
|
||||
*
|
||||
* Components should return one of these setup priorities in get_setup_priority.
|
||||
@@ -44,14 +47,13 @@ extern const float LATE;
|
||||
|
||||
static const uint32_t SCHEDULER_DONT_RUN = 4294967295UL;
|
||||
|
||||
#define LOG_UPDATE_INTERVAL(this) \
|
||||
if (this->get_update_interval() == SCHEDULER_DONT_RUN) { \
|
||||
ESP_LOGCONFIG(TAG, " Update Interval: never"); \
|
||||
} else if (this->get_update_interval() < 100) { \
|
||||
ESP_LOGCONFIG(TAG, " Update Interval: %.3fs", this->get_update_interval() / 1000.0f); \
|
||||
} else { \
|
||||
ESP_LOGCONFIG(TAG, " Update Interval: %.1fs", this->get_update_interval() / 1000.0f); \
|
||||
}
|
||||
// Forward declaration
|
||||
class PollingComponent;
|
||||
|
||||
// Function declaration for LOG_UPDATE_INTERVAL
|
||||
void log_update_interval(const char *tag, PollingComponent *component);
|
||||
|
||||
#define LOG_UPDATE_INTERVAL(this) log_update_interval(TAG, this)
|
||||
|
||||
extern const uint8_t COMPONENT_STATE_MASK;
|
||||
extern const uint8_t COMPONENT_STATE_CONSTRUCTION;
|
||||
@@ -203,6 +205,7 @@ class Component {
|
||||
bool status_has_error() const;
|
||||
|
||||
void status_set_warning(const char *message = nullptr);
|
||||
void status_set_warning(const LogString *message);
|
||||
|
||||
void status_set_error(const char *message = nullptr);
|
||||
|
||||
|
@@ -14,7 +14,20 @@ namespace esphome {
|
||||
|
||||
static const char *const TAG = "scheduler";
|
||||
|
||||
static const uint32_t MAX_LOGICALLY_DELETED_ITEMS = 10;
|
||||
// Memory pool configuration constants
|
||||
// Pool size of 5 matches typical usage patterns (2-4 active timers)
|
||||
// - Minimal memory overhead (~250 bytes on ESP32)
|
||||
// - Sufficient for most configs with a couple sensors/components
|
||||
// - Still prevents heap fragmentation and allocation stalls
|
||||
// - Complex setups with many timers will just allocate beyond the pool
|
||||
// See https://github.com/esphome/backlog/issues/52
|
||||
static constexpr size_t MAX_POOL_SIZE = 5;
|
||||
|
||||
// Maximum number of logically deleted (cancelled) items before forcing cleanup.
|
||||
// Set to 5 to match the pool size - when we have as many cancelled items as our
|
||||
// pool can hold, it's time to clean up and recycle them.
|
||||
static constexpr uint32_t MAX_LOGICALLY_DELETED_ITEMS = 5;
|
||||
|
||||
// Half the 32-bit range - used to detect rollovers vs normal time progression
|
||||
static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2;
|
||||
// max delay to start an interval sequence
|
||||
@@ -79,8 +92,28 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
|
||||
return;
|
||||
}
|
||||
|
||||
// Get fresh timestamp BEFORE taking lock - millis_64_ may need to acquire lock itself
|
||||
const uint64_t now = this->millis_64_(millis());
|
||||
|
||||
// Take lock early to protect scheduler_item_pool_ access
|
||||
LockGuard guard{this->lock_};
|
||||
|
||||
// Create and populate the scheduler item
|
||||
auto item = make_unique<SchedulerItem>();
|
||||
std::unique_ptr<SchedulerItem> item;
|
||||
if (!this->scheduler_item_pool_.empty()) {
|
||||
// Reuse from pool
|
||||
item = std::move(this->scheduler_item_pool_.back());
|
||||
this->scheduler_item_pool_.pop_back();
|
||||
#ifdef ESPHOME_DEBUG_SCHEDULER
|
||||
ESP_LOGD(TAG, "Reused item from pool (pool size now: %zu)", this->scheduler_item_pool_.size());
|
||||
#endif
|
||||
} else {
|
||||
// Allocate new if pool is empty
|
||||
item = make_unique<SchedulerItem>();
|
||||
#ifdef ESPHOME_DEBUG_SCHEDULER
|
||||
ESP_LOGD(TAG, "Allocated new item (pool empty)");
|
||||
#endif
|
||||
}
|
||||
item->component = component;
|
||||
item->set_name(name_cstr, !is_static_string);
|
||||
item->type = type;
|
||||
@@ -99,7 +132,6 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
|
||||
// Single-core platforms don't need thread-safe defer handling
|
||||
if (delay == 0 && type == SchedulerItem::TIMEOUT) {
|
||||
// Put in defer queue for guaranteed FIFO execution
|
||||
LockGuard guard{this->lock_};
|
||||
if (!skip_cancel) {
|
||||
this->cancel_item_locked_(component, name_cstr, type);
|
||||
}
|
||||
@@ -108,9 +140,6 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
|
||||
}
|
||||
#endif /* not ESPHOME_THREAD_SINGLE */
|
||||
|
||||
// Get fresh timestamp for new timer/interval - ensures accurate scheduling
|
||||
const auto now = this->millis_64_(millis()); // Fresh millis() call
|
||||
|
||||
// Type-specific setup
|
||||
if (type == SchedulerItem::INTERVAL) {
|
||||
item->interval = delay;
|
||||
@@ -142,8 +171,6 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
|
||||
}
|
||||
#endif /* ESPHOME_DEBUG_SCHEDULER */
|
||||
|
||||
LockGuard guard{this->lock_};
|
||||
|
||||
// For retries, check if there's a cancelled timeout first
|
||||
if (is_retry && name_cstr != nullptr && type == SchedulerItem::TIMEOUT &&
|
||||
(has_cancelled_timeout_in_container_(this->items_, component, name_cstr, /* match_retry= */ true) ||
|
||||
@@ -319,6 +346,8 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
if (!this->should_skip_item_(item.get())) {
|
||||
this->execute_item_(item.get(), now);
|
||||
}
|
||||
// Recycle the defer item after execution
|
||||
this->recycle_item_(std::move(item));
|
||||
}
|
||||
#endif /* not ESPHOME_THREAD_SINGLE */
|
||||
|
||||
@@ -326,6 +355,9 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from Application::loop()
|
||||
this->process_to_add();
|
||||
|
||||
// Track if any items were added to to_add_ during this call (intervals or from callbacks)
|
||||
bool has_added_items = false;
|
||||
|
||||
#ifdef ESPHOME_DEBUG_SCHEDULER
|
||||
static uint64_t last_print = 0;
|
||||
|
||||
@@ -335,11 +367,11 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
#ifdef ESPHOME_THREAD_MULTI_ATOMICS
|
||||
const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed);
|
||||
const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed);
|
||||
ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64,
|
||||
major_dbg, last_dbg);
|
||||
ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(),
|
||||
this->scheduler_item_pool_.size(), now_64, major_dbg, last_dbg);
|
||||
#else /* not ESPHOME_THREAD_MULTI_ATOMICS */
|
||||
ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64,
|
||||
this->millis_major_, this->last_millis_);
|
||||
ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(),
|
||||
this->scheduler_item_pool_.size(), now_64, this->millis_major_, this->last_millis_);
|
||||
#endif /* else ESPHOME_THREAD_MULTI_ATOMICS */
|
||||
// Cleanup before debug output
|
||||
this->cleanup_();
|
||||
@@ -352,9 +384,10 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
}
|
||||
|
||||
const char *name = item->get_name();
|
||||
ESP_LOGD(TAG, " %s '%s/%s' interval=%" PRIu32 " next_execution in %" PRIu64 "ms at %" PRIu64,
|
||||
bool is_cancelled = is_item_removed_(item.get());
|
||||
ESP_LOGD(TAG, " %s '%s/%s' interval=%" PRIu32 " next_execution in %" PRIu64 "ms at %" PRIu64 "%s",
|
||||
item->get_type_str(), item->get_source(), name ? name : "(null)", item->interval,
|
||||
item->next_execution_ - now_64, item->next_execution_);
|
||||
item->next_execution_ - now_64, item->next_execution_, is_cancelled ? " [CANCELLED]" : "");
|
||||
|
||||
old_items.push_back(std::move(item));
|
||||
}
|
||||
@@ -369,8 +402,13 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
}
|
||||
#endif /* ESPHOME_DEBUG_SCHEDULER */
|
||||
|
||||
// If we have too many items to remove
|
||||
if (this->to_remove_ > MAX_LOGICALLY_DELETED_ITEMS) {
|
||||
// Cleanup removed items before processing
|
||||
// First try to clean items from the top of the heap (fast path)
|
||||
this->cleanup_();
|
||||
|
||||
// If we still have too many cancelled items, do a full cleanup
|
||||
// This only happens if cancelled items are stuck in the middle/bottom of the heap
|
||||
if (this->to_remove_ >= MAX_LOGICALLY_DELETED_ITEMS) {
|
||||
// We hold the lock for the entire cleanup operation because:
|
||||
// 1. We're rebuilding the entire items_ list, so we need exclusive access throughout
|
||||
// 2. Other threads must see either the old state or the new state, not intermediate states
|
||||
@@ -380,10 +418,13 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
|
||||
std::vector<std::unique_ptr<SchedulerItem>> valid_items;
|
||||
|
||||
// Move all non-removed items to valid_items
|
||||
// Move all non-removed items to valid_items, recycle removed ones
|
||||
for (auto &item : this->items_) {
|
||||
if (!item->remove) {
|
||||
if (!is_item_removed_(item.get())) {
|
||||
valid_items.push_back(std::move(item));
|
||||
} else {
|
||||
// Recycle removed items
|
||||
this->recycle_item_(std::move(item));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -393,9 +434,6 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
|
||||
this->to_remove_ = 0;
|
||||
}
|
||||
|
||||
// Cleanup removed items before processing
|
||||
this->cleanup_();
|
||||
while (!this->items_.empty()) {
|
||||
// use scoping to indicate visibility of `item` variable
|
||||
{
|
||||
@@ -469,16 +507,25 @@ void HOT Scheduler::call(uint32_t now) {
|
||||
// Add new item directly to to_add_
|
||||
// since we have the lock held
|
||||
this->to_add_.push_back(std::move(item));
|
||||
} else {
|
||||
// Timeout completed - recycle it
|
||||
this->recycle_item_(std::move(item));
|
||||
}
|
||||
|
||||
has_added_items |= !this->to_add_.empty();
|
||||
}
|
||||
}
|
||||
|
||||
if (has_added_items) {
|
||||
this->process_to_add();
|
||||
}
|
||||
}
|
||||
void HOT Scheduler::process_to_add() {
|
||||
LockGuard guard{this->lock_};
|
||||
for (auto &it : this->to_add_) {
|
||||
if (it->remove) {
|
||||
if (is_item_removed_(it.get())) {
|
||||
// Recycle cancelled items
|
||||
this->recycle_item_(std::move(it));
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -518,6 +565,10 @@ size_t HOT Scheduler::cleanup_() {
|
||||
}
|
||||
void HOT Scheduler::pop_raw_() {
|
||||
std::pop_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
|
||||
|
||||
// Instead of destroying, recycle the item
|
||||
this->recycle_item_(std::move(this->items_.back()));
|
||||
|
||||
this->items_.pop_back();
|
||||
}
|
||||
|
||||
@@ -552,7 +603,7 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
|
||||
|
||||
// Check all containers for matching items
|
||||
#ifndef ESPHOME_THREAD_SINGLE
|
||||
// Only check defer queue for timeouts (intervals never go there)
|
||||
// Mark items in defer queue as cancelled (they'll be skipped when processed)
|
||||
if (type == SchedulerItem::TIMEOUT) {
|
||||
for (auto &item : this->defer_queue_) {
|
||||
if (this->matches_item_(item, component, name_cstr, type, match_retry)) {
|
||||
@@ -564,6 +615,16 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
|
||||
#endif /* not ESPHOME_THREAD_SINGLE */
|
||||
|
||||
// Cancel items in the main heap
|
||||
// Special case: if the last item in the heap matches, we can remove it immediately
|
||||
// (removing the last element doesn't break heap structure)
|
||||
if (!this->items_.empty()) {
|
||||
auto &last_item = this->items_.back();
|
||||
if (this->matches_item_(last_item, component, name_cstr, type, match_retry)) {
|
||||
this->recycle_item_(std::move(this->items_.back()));
|
||||
this->items_.pop_back();
|
||||
total_cancelled++;
|
||||
}
|
||||
// For other items in heap, we can only mark for removal (can't remove from middle of heap)
|
||||
for (auto &item : this->items_) {
|
||||
if (this->matches_item_(item, component, name_cstr, type, match_retry)) {
|
||||
this->mark_item_removed_(item.get());
|
||||
@@ -571,6 +632,7 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
|
||||
this->to_remove_++; // Track removals for heap items
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel items in to_add_
|
||||
for (auto &item : this->to_add_) {
|
||||
@@ -747,4 +809,25 @@ bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,
|
||||
return a->next_execution_ > b->next_execution_;
|
||||
}
|
||||
|
||||
void Scheduler::recycle_item_(std::unique_ptr<SchedulerItem> item) {
|
||||
if (!item)
|
||||
return;
|
||||
|
||||
if (this->scheduler_item_pool_.size() < MAX_POOL_SIZE) {
|
||||
// Clear callback to release captured resources
|
||||
item->callback = nullptr;
|
||||
// Clear dynamic name if any
|
||||
item->clear_dynamic_name();
|
||||
this->scheduler_item_pool_.push_back(std::move(item));
|
||||
#ifdef ESPHOME_DEBUG_SCHEDULER
|
||||
ESP_LOGD(TAG, "Recycled item to pool (pool size now: %zu)", this->scheduler_item_pool_.size());
|
||||
#endif
|
||||
} else {
|
||||
#ifdef ESPHOME_DEBUG_SCHEDULER
|
||||
ESP_LOGD(TAG, "Pool full (size: %zu), deleting item", this->scheduler_item_pool_.size());
|
||||
#endif
|
||||
}
|
||||
// else: unique_ptr will delete the item when it goes out of scope
|
||||
}
|
||||
|
||||
} // namespace esphome
|
||||
|
@@ -142,11 +142,7 @@ class Scheduler {
|
||||
}
|
||||
|
||||
// Destructor to clean up dynamic names
|
||||
~SchedulerItem() {
|
||||
if (name_is_dynamic) {
|
||||
delete[] name_.dynamic_name;
|
||||
}
|
||||
}
|
||||
~SchedulerItem() { clear_dynamic_name(); }
|
||||
|
||||
// Delete copy operations to prevent accidental copies
|
||||
SchedulerItem(const SchedulerItem &) = delete;
|
||||
@@ -159,13 +155,19 @@ class Scheduler {
|
||||
// Helper to get the name regardless of storage type
|
||||
const char *get_name() const { return name_is_dynamic ? name_.dynamic_name : name_.static_name; }
|
||||
|
||||
// Helper to clear dynamic name if allocated
|
||||
void clear_dynamic_name() {
|
||||
if (name_is_dynamic && name_.dynamic_name) {
|
||||
delete[] name_.dynamic_name;
|
||||
name_.dynamic_name = nullptr;
|
||||
name_is_dynamic = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to set name with proper ownership
|
||||
void set_name(const char *name, bool make_copy = false) {
|
||||
// Clean up old dynamic name if any
|
||||
if (name_is_dynamic && name_.dynamic_name) {
|
||||
delete[] name_.dynamic_name;
|
||||
name_is_dynamic = false;
|
||||
}
|
||||
clear_dynamic_name();
|
||||
|
||||
if (!name) {
|
||||
// nullptr case - no name provided
|
||||
@@ -214,6 +216,15 @@ class Scheduler {
|
||||
// Common implementation for cancel operations
|
||||
bool cancel_item_(Component *component, bool is_static_string, const void *name_ptr, SchedulerItem::Type type);
|
||||
|
||||
// Helper to check if two scheduler item names match
|
||||
inline bool HOT names_match_(const char *name1, const char *name2) const {
|
||||
// Check pointer equality first (common for static strings), then string contents
|
||||
// The core ESPHome codebase uses static strings (const char*) for component names,
|
||||
// making pointer comparison effective. The std::string overloads exist only for
|
||||
// compatibility with external components but are rarely used in practice.
|
||||
return (name1 != nullptr && name2 != nullptr) && ((name1 == name2) || (strcmp(name1, name2) == 0));
|
||||
}
|
||||
|
||||
// Helper function to check if item matches criteria for cancellation
|
||||
inline bool HOT matches_item_(const std::unique_ptr<SchedulerItem> &item, Component *component, const char *name_cstr,
|
||||
SchedulerItem::Type type, bool match_retry, bool skip_removed = true) const {
|
||||
@@ -221,29 +232,20 @@ class Scheduler {
|
||||
(match_retry && !item->is_retry)) {
|
||||
return false;
|
||||
}
|
||||
const char *item_name = item->get_name();
|
||||
if (item_name == nullptr) {
|
||||
return false;
|
||||
}
|
||||
// Fast path: if pointers are equal
|
||||
// This is effective because the core ESPHome codebase uses static strings (const char*)
|
||||
// for component names. The std::string overloads exist only for compatibility with
|
||||
// external components, but are rarely used in practice.
|
||||
if (item_name == name_cstr) {
|
||||
return true;
|
||||
}
|
||||
// Slow path: compare string contents
|
||||
return strcmp(name_cstr, item_name) == 0;
|
||||
return this->names_match_(item->get_name(), name_cstr);
|
||||
}
|
||||
|
||||
// Helper to execute a scheduler item
|
||||
void execute_item_(SchedulerItem *item, uint32_t now);
|
||||
|
||||
// Helper to check if item should be skipped
|
||||
bool should_skip_item_(const SchedulerItem *item) const {
|
||||
return item->remove || (item->component != nullptr && item->component->is_failed());
|
||||
bool should_skip_item_(SchedulerItem *item) const {
|
||||
return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed());
|
||||
}
|
||||
|
||||
// Helper to recycle a SchedulerItem
|
||||
void recycle_item_(std::unique_ptr<SchedulerItem> item);
|
||||
|
||||
// Helper to check if item is marked for removal (platform-specific)
|
||||
// Returns true if item should be skipped, handles platform-specific synchronization
|
||||
// For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
|
||||
@@ -280,7 +282,8 @@ class Scheduler {
|
||||
bool has_cancelled_timeout_in_container_(const Container &container, Component *component, const char *name_cstr,
|
||||
bool match_retry) const {
|
||||
for (const auto &item : container) {
|
||||
if (item->remove && this->matches_item_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry,
|
||||
if (is_item_removed_(item.get()) &&
|
||||
this->matches_item_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry,
|
||||
/* skip_removed= */ false)) {
|
||||
return true;
|
||||
}
|
||||
@@ -297,6 +300,16 @@ class Scheduler {
|
||||
#endif /* ESPHOME_THREAD_SINGLE */
|
||||
uint32_t to_remove_{0};
|
||||
|
||||
// Memory pool for recycling SchedulerItem objects to reduce heap churn.
|
||||
// Design decisions:
|
||||
// - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items
|
||||
// - The vector grows dynamically up to MAX_POOL_SIZE (5) only when needed, saving memory on simple setups
|
||||
// - Pool size of 5 matches typical usage (2-4 timers) while keeping memory overhead low (~250 bytes on ESP32)
|
||||
// - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation
|
||||
// can stall the entire system, causing timing issues and dropped events for any components that need
|
||||
// to synchronize between tasks (see https://github.com/esphome/backlog/issues/52)
|
||||
std::vector<std::unique_ptr<SchedulerItem>> scheduler_item_pool_;
|
||||
|
||||
#ifdef ESPHOME_THREAD_MULTI_ATOMICS
|
||||
/*
|
||||
* Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates
|
||||
|
@@ -308,8 +308,12 @@ def perform_ota(
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def run_ota_impl_(remote_host, remote_port, password, filename):
|
||||
def run_ota_impl_(
|
||||
remote_host: str | list[str], remote_port: int, password: str, filename: str
|
||||
) -> int:
|
||||
# Handle both single host and list of hosts
|
||||
try:
|
||||
# Resolve all hosts at once for parallel DNS resolution
|
||||
res = resolve_ip_address(remote_host, remote_port)
|
||||
except EsphomeError as err:
|
||||
_LOGGER.error(
|
||||
@@ -350,7 +354,9 @@ def run_ota_impl_(remote_host, remote_port, password, filename):
|
||||
return 1
|
||||
|
||||
|
||||
def run_ota(remote_host, remote_port, password, filename):
|
||||
def run_ota(
|
||||
remote_host: str | list[str], remote_port: int, password: str, filename: str
|
||||
) -> int:
|
||||
try:
|
||||
return run_ota_impl_(remote_host, remote_port, password, filename)
|
||||
except OTAError as err:
|
||||
|
@@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import codecs
|
||||
from contextlib import suppress
|
||||
import ipaddress
|
||||
@@ -11,6 +13,18 @@ from urllib.parse import urlparse
|
||||
|
||||
from esphome.const import __version__ as ESPHOME_VERSION
|
||||
|
||||
# Type aliases for socket address information
|
||||
AddrInfo = tuple[
|
||||
int, # family (AF_INET, AF_INET6, etc.)
|
||||
int, # type (SOCK_STREAM, SOCK_DGRAM, etc.)
|
||||
int, # proto (IPPROTO_TCP, etc.)
|
||||
str, # canonname
|
||||
tuple[str, int] | tuple[str, int, int, int], # sockaddr (IPv4 or IPv6)
|
||||
]
|
||||
IPv4SockAddr = tuple[str, int] # (host, port)
|
||||
IPv6SockAddr = tuple[str, int, int, int] # (host, port, flowinfo, scope_id)
|
||||
SockAddr = IPv4SockAddr | IPv6SockAddr
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
IS_MACOS = platform.system() == "Darwin"
|
||||
@@ -147,32 +161,7 @@ def is_ip_address(host):
|
||||
return False
|
||||
|
||||
|
||||
def _resolve_with_zeroconf(host):
|
||||
from esphome.core import EsphomeError
|
||||
from esphome.zeroconf import EsphomeZeroconf
|
||||
|
||||
try:
|
||||
zc = EsphomeZeroconf()
|
||||
except Exception as err:
|
||||
raise EsphomeError(
|
||||
"Cannot start mDNS sockets, is this a docker container without "
|
||||
"host network mode?"
|
||||
) from err
|
||||
try:
|
||||
info = zc.resolve_host(f"{host}.")
|
||||
except Exception as err:
|
||||
raise EsphomeError(f"Error resolving mDNS hostname: {err}") from err
|
||||
finally:
|
||||
zc.close()
|
||||
if info is None:
|
||||
raise EsphomeError(
|
||||
"Error resolving address with mDNS: Did not respond. "
|
||||
"Maybe the device is offline."
|
||||
)
|
||||
return info
|
||||
|
||||
|
||||
def addr_preference_(res):
|
||||
def addr_preference_(res: AddrInfo) -> int:
|
||||
# Trivial alternative to RFC6724 sorting. Put sane IPv6 first, then
|
||||
# Legacy IP, then IPv6 link-local addresses without an actual link.
|
||||
sa = res[4]
|
||||
@@ -184,66 +173,70 @@ def addr_preference_(res):
|
||||
return 1
|
||||
|
||||
|
||||
def resolve_ip_address(host, port):
|
||||
def resolve_ip_address(host: str | list[str], port: int) -> list[AddrInfo]:
|
||||
import socket
|
||||
|
||||
from esphome.core import EsphomeError
|
||||
|
||||
# There are five cases here. The host argument could be one of:
|
||||
# • a *list* of IP addresses discovered by MQTT,
|
||||
# • a single IP address specified by the user,
|
||||
# • a .local hostname to be resolved by mDNS,
|
||||
# • a normal hostname to be resolved in DNS, or
|
||||
# • A URL from which we should extract the hostname.
|
||||
#
|
||||
# In each of the first three cases, we end up with IP addresses in
|
||||
# string form which need to be converted to a 5-tuple to be used
|
||||
# for the socket connection attempt. The easiest way to construct
|
||||
# those is to pass the IP address string to getaddrinfo(). Which,
|
||||
# coincidentally, is how we do hostname lookups in the other cases
|
||||
# too. So first build a list which contains either IP addresses or
|
||||
# a single hostname, then call getaddrinfo() on each element of
|
||||
# that list.
|
||||
|
||||
errs = []
|
||||
hosts: list[str]
|
||||
if isinstance(host, list):
|
||||
addr_list = host
|
||||
elif is_ip_address(host):
|
||||
addr_list = [host]
|
||||
hosts = host
|
||||
else:
|
||||
if not is_ip_address(host):
|
||||
url = urlparse(host)
|
||||
if url.scheme != "":
|
||||
host = url.hostname
|
||||
hosts = [host]
|
||||
|
||||
addr_list = []
|
||||
if host.endswith(".local"):
|
||||
res: list[AddrInfo] = []
|
||||
if all(is_ip_address(h) for h in hosts):
|
||||
# Fast path: all are IP addresses, use socket.getaddrinfo with AI_NUMERICHOST
|
||||
for addr in hosts:
|
||||
try:
|
||||
_LOGGER.info("Resolving IP address of %s in mDNS", host)
|
||||
addr_list = _resolve_with_zeroconf(host)
|
||||
except EsphomeError as err:
|
||||
errs.append(str(err))
|
||||
res += socket.getaddrinfo(
|
||||
addr, port, proto=socket.IPPROTO_TCP, flags=socket.AI_NUMERICHOST
|
||||
)
|
||||
except OSError:
|
||||
_LOGGER.debug("Failed to parse IP address '%s'", addr)
|
||||
# Sort by preference
|
||||
res.sort(key=addr_preference_)
|
||||
return res
|
||||
|
||||
# If not mDNS, or if mDNS failed, use normal DNS
|
||||
if not addr_list:
|
||||
addr_list = [host]
|
||||
from esphome.resolver import AsyncResolver
|
||||
|
||||
# Now we have a list containing either IP addresses or a hostname
|
||||
res = []
|
||||
for addr in addr_list:
|
||||
if not is_ip_address(addr):
|
||||
_LOGGER.info("Resolving IP address of %s", host)
|
||||
try:
|
||||
r = socket.getaddrinfo(addr, port, proto=socket.IPPROTO_TCP)
|
||||
except OSError as err:
|
||||
errs.append(str(err))
|
||||
raise EsphomeError(
|
||||
f"Error resolving IP address: {', '.join(errs)}"
|
||||
) from err
|
||||
resolver = AsyncResolver(hosts, port)
|
||||
addr_infos = resolver.resolve()
|
||||
# Convert aioesphomeapi AddrInfo to our format
|
||||
for addr_info in addr_infos:
|
||||
sockaddr = addr_info.sockaddr
|
||||
if addr_info.family == socket.AF_INET6:
|
||||
# IPv6
|
||||
sockaddr_tuple = (
|
||||
sockaddr.address,
|
||||
sockaddr.port,
|
||||
sockaddr.flowinfo,
|
||||
sockaddr.scope_id,
|
||||
)
|
||||
else:
|
||||
# IPv4
|
||||
sockaddr_tuple = (sockaddr.address, sockaddr.port)
|
||||
|
||||
res = res + r
|
||||
res.append(
|
||||
(
|
||||
addr_info.family,
|
||||
addr_info.type,
|
||||
addr_info.proto,
|
||||
"", # canonname
|
||||
sockaddr_tuple,
|
||||
)
|
||||
)
|
||||
|
||||
# Zeroconf tends to give us link-local IPv6 addresses without specifying
|
||||
# the link. Put those last in the list to be attempted.
|
||||
# Sort by preference
|
||||
res.sort(key=addr_preference_)
|
||||
return res
|
||||
|
||||
@@ -262,15 +255,7 @@ def sort_ip_addresses(address_list: list[str]) -> list[str]:
|
||||
|
||||
# First "resolve" all the IP addresses to getaddrinfo() tuples of the form
|
||||
# (family, type, proto, canonname, sockaddr)
|
||||
res: list[
|
||||
tuple[
|
||||
int,
|
||||
int,
|
||||
int,
|
||||
str | None,
|
||||
tuple[str, int] | tuple[str, int, int, int],
|
||||
]
|
||||
] = []
|
||||
res: list[AddrInfo] = []
|
||||
for addr in address_list:
|
||||
# This should always work as these are supposed to be IP addresses
|
||||
try:
|
||||
|
67
esphome/resolver.py
Normal file
67
esphome/resolver.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""DNS resolver for ESPHome using aioesphomeapi."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
|
||||
from aioesphomeapi.core import ResolveAPIError, ResolveTimeoutAPIError
|
||||
import aioesphomeapi.host_resolver as hr
|
||||
|
||||
from esphome.core import EsphomeError
|
||||
|
||||
RESOLVE_TIMEOUT = 10.0 # seconds
|
||||
|
||||
|
||||
class AsyncResolver(threading.Thread):
|
||||
"""Resolver using aioesphomeapi that runs in a thread for faster results.
|
||||
|
||||
This resolver uses aioesphomeapi's async_resolve_host to handle DNS resolution,
|
||||
including proper .local domain fallback. Running in a thread allows us to get
|
||||
the result immediately without waiting for asyncio.run() to complete its
|
||||
cleanup cycle, which can take significant time.
|
||||
"""
|
||||
|
||||
def __init__(self, hosts: list[str], port: int) -> None:
|
||||
"""Initialize the resolver."""
|
||||
super().__init__(daemon=True)
|
||||
self.hosts = hosts
|
||||
self.port = port
|
||||
self.result: list[hr.AddrInfo] | None = None
|
||||
self.exception: Exception | None = None
|
||||
self.event = threading.Event()
|
||||
|
||||
async def _resolve(self) -> None:
|
||||
"""Resolve hostnames to IP addresses."""
|
||||
try:
|
||||
self.result = await hr.async_resolve_host(
|
||||
self.hosts, self.port, timeout=RESOLVE_TIMEOUT
|
||||
)
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
# We need to catch all exceptions to ensure the event is set
|
||||
# Otherwise the thread could hang forever
|
||||
self.exception = e
|
||||
finally:
|
||||
self.event.set()
|
||||
|
||||
def run(self) -> None:
|
||||
"""Run the DNS resolution."""
|
||||
asyncio.run(self._resolve())
|
||||
|
||||
def resolve(self) -> list[hr.AddrInfo]:
|
||||
"""Start the thread and wait for the result."""
|
||||
self.start()
|
||||
|
||||
if not self.event.wait(
|
||||
timeout=RESOLVE_TIMEOUT + 1.0
|
||||
): # Give it 1 second more than the resolver timeout
|
||||
raise EsphomeError("Timeout resolving IP address")
|
||||
|
||||
if exc := self.exception:
|
||||
if isinstance(exc, ResolveTimeoutAPIError):
|
||||
raise EsphomeError(f"Timeout resolving IP address: {exc}") from exc
|
||||
if isinstance(exc, ResolveAPIError):
|
||||
raise EsphomeError(f"Error resolving IP address: {exc}") from exc
|
||||
raise exc
|
||||
|
||||
return self.result
|
@@ -12,8 +12,8 @@ platformio==6.1.18 # When updating platformio, also update /docker/Dockerfile
|
||||
esptool==5.0.2
|
||||
click==8.1.7
|
||||
esphome-dashboard==20250904.0
|
||||
aioesphomeapi==40.0.1
|
||||
zeroconf==0.147.0
|
||||
aioesphomeapi==40.0.2
|
||||
zeroconf==0.147.2
|
||||
puremagic==1.30
|
||||
ruamel.yaml==0.18.15 # dashboard_import
|
||||
esphome-glyphsets==0.2.0
|
||||
|
@@ -6,7 +6,7 @@ pre-commit
|
||||
|
||||
# Unit tests
|
||||
pytest==8.4.2
|
||||
pytest-cov==6.2.1
|
||||
pytest-cov==6.3.0
|
||||
pytest-mock==3.15.0
|
||||
pytest-asyncio==1.1.0
|
||||
pytest-xdist==3.8.0
|
||||
|
@@ -27,11 +27,13 @@ void GPIOExpanderTestComponent::setup() {
|
||||
|
||||
bool GPIOExpanderTestComponent::digital_read_hw(uint8_t pin) {
|
||||
ESP_LOGD(TAG, "digital_read_hw pin=%d", pin);
|
||||
// Return true to indicate successful read operation
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GPIOExpanderTestComponent::digital_read_cache(uint8_t pin) {
|
||||
ESP_LOGD(TAG, "digital_read_cache pin=%d", pin);
|
||||
// Return the pin state (always HIGH for testing)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@@ -0,0 +1,24 @@
|
||||
import esphome.codegen as cg
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import CONF_ID
|
||||
|
||||
AUTO_LOAD = ["gpio_expander"]
|
||||
|
||||
gpio_expander_test_component_uint16_ns = cg.esphome_ns.namespace(
|
||||
"gpio_expander_test_component_uint16"
|
||||
)
|
||||
|
||||
GPIOExpanderTestUint16Component = gpio_expander_test_component_uint16_ns.class_(
|
||||
"GPIOExpanderTestUint16Component", cg.Component
|
||||
)
|
||||
|
||||
CONFIG_SCHEMA = cv.Schema(
|
||||
{
|
||||
cv.GenerateID(): cv.declare_id(GPIOExpanderTestUint16Component),
|
||||
}
|
||||
).extend(cv.COMPONENT_SCHEMA)
|
||||
|
||||
|
||||
async def to_code(config):
|
||||
var = cg.new_Pvariable(config[CONF_ID])
|
||||
await cg.register_component(var, config)
|
@@ -0,0 +1,43 @@
|
||||
#include "gpio_expander_test_component_uint16.h"
|
||||
#include "esphome/core/log.h"
|
||||
|
||||
namespace esphome::gpio_expander_test_component_uint16 {
|
||||
|
||||
static const char *const TAG = "gpio_expander_test_uint16";
|
||||
|
||||
void GPIOExpanderTestUint16Component::setup() {
|
||||
ESP_LOGD(TAG, "Testing uint16_t bank (single 16-pin bank)");
|
||||
|
||||
// Test reading all 16 pins - first should trigger hw read, rest use cache
|
||||
for (uint8_t pin = 0; pin < 16; pin++) {
|
||||
this->digital_read(pin);
|
||||
}
|
||||
|
||||
// Reset cache and test specific reads
|
||||
ESP_LOGD(TAG, "Resetting cache for uint16_t test");
|
||||
this->reset_pin_cache_();
|
||||
|
||||
// First read triggers hw for entire bank
|
||||
this->digital_read(5);
|
||||
// These should all use cache since they're in the same bank
|
||||
this->digital_read(10);
|
||||
this->digital_read(15);
|
||||
this->digital_read(0);
|
||||
|
||||
ESP_LOGD(TAG, "DONE_UINT16");
|
||||
}
|
||||
|
||||
bool GPIOExpanderTestUint16Component::digital_read_hw(uint8_t pin) {
|
||||
ESP_LOGD(TAG, "uint16_digital_read_hw pin=%d", pin);
|
||||
// In a real component, this would read from I2C/SPI into internal state
|
||||
// For testing, we just return true to indicate successful read
|
||||
return true; // Return true to indicate successful read
|
||||
}
|
||||
|
||||
bool GPIOExpanderTestUint16Component::digital_read_cache(uint8_t pin) {
|
||||
ESP_LOGD(TAG, "uint16_digital_read_cache pin=%d", pin);
|
||||
// Return the actual pin state from our test pattern
|
||||
return (this->test_state_ >> pin) & 1;
|
||||
}
|
||||
|
||||
} // namespace esphome::gpio_expander_test_component_uint16
|
@@ -0,0 +1,23 @@
|
||||
#pragma once
|
||||
|
||||
#include "esphome/components/gpio_expander/cached_gpio.h"
|
||||
#include "esphome/core/component.h"
|
||||
|
||||
namespace esphome::gpio_expander_test_component_uint16 {
|
||||
|
||||
// Test component using uint16_t bank type (single 16-pin bank)
|
||||
class GPIOExpanderTestUint16Component : public Component,
|
||||
public esphome::gpio_expander::CachedGpioExpander<uint16_t, 16> {
|
||||
public:
|
||||
void setup() override;
|
||||
|
||||
protected:
|
||||
bool digital_read_hw(uint8_t pin) override;
|
||||
bool digital_read_cache(uint8_t pin) override;
|
||||
void digital_write_hw(uint8_t pin, bool value) override{};
|
||||
|
||||
private:
|
||||
uint16_t test_state_{0xAAAA}; // Test pattern: alternating bits
|
||||
};
|
||||
|
||||
} // namespace esphome::gpio_expander_test_component_uint16
|
@@ -12,6 +12,10 @@ external_components:
|
||||
- source:
|
||||
type: local
|
||||
path: EXTERNAL_COMPONENT_PATH
|
||||
components: [gpio_expander_test_component]
|
||||
components: [gpio_expander_test_component, gpio_expander_test_component_uint16]
|
||||
|
||||
# Test with uint8_t (multiple banks)
|
||||
gpio_expander_test_component:
|
||||
|
||||
# Test with uint16_t (single bank)
|
||||
gpio_expander_test_component_uint16:
|
||||
|
282
tests/integration/fixtures/scheduler_pool.yaml
Normal file
282
tests/integration/fixtures/scheduler_pool.yaml
Normal file
@@ -0,0 +1,282 @@
|
||||
esphome:
|
||||
name: scheduler-pool-test
|
||||
on_boot:
|
||||
priority: -100
|
||||
then:
|
||||
- logger.log: "Starting scheduler pool tests"
|
||||
debug_scheduler: true # Enable scheduler debug logging
|
||||
|
||||
host:
|
||||
api:
|
||||
services:
|
||||
- service: run_phase_1
|
||||
then:
|
||||
- script.execute: test_pool_recycling
|
||||
- service: run_phase_2
|
||||
then:
|
||||
- script.execute: test_sensor_polling
|
||||
- service: run_phase_3
|
||||
then:
|
||||
- script.execute: test_communication_patterns
|
||||
- service: run_phase_4
|
||||
then:
|
||||
- script.execute: test_defer_patterns
|
||||
- service: run_phase_5
|
||||
then:
|
||||
- script.execute: test_pool_reuse_verification
|
||||
- service: run_phase_6
|
||||
then:
|
||||
- script.execute: test_full_pool_reuse
|
||||
- service: run_phase_7
|
||||
then:
|
||||
- script.execute: test_same_defer_optimization
|
||||
- service: run_complete
|
||||
then:
|
||||
- script.execute: complete_test
|
||||
logger:
|
||||
level: VERY_VERBOSE # Need VERY_VERBOSE to see pool debug messages
|
||||
|
||||
globals:
|
||||
- id: create_count
|
||||
type: int
|
||||
initial_value: '0'
|
||||
- id: cancel_count
|
||||
type: int
|
||||
initial_value: '0'
|
||||
- id: interval_counter
|
||||
type: int
|
||||
initial_value: '0'
|
||||
- id: pool_test_done
|
||||
type: bool
|
||||
initial_value: 'false'
|
||||
|
||||
script:
|
||||
- id: test_pool_recycling
|
||||
then:
|
||||
- logger.log: "Testing scheduler pool recycling with realistic usage patterns"
|
||||
- lambda: |-
|
||||
auto *component = id(test_sensor);
|
||||
|
||||
// Simulate realistic component behavior with timeouts that complete naturally
|
||||
ESP_LOGI("test", "Phase 1: Simulating normal component lifecycle");
|
||||
|
||||
// Sensor update timeouts (common pattern)
|
||||
App.scheduler.set_timeout(component, "sensor_init", 10, []() {
|
||||
ESP_LOGD("test", "Sensor initialized");
|
||||
id(create_count)++;
|
||||
});
|
||||
|
||||
// Retry timeout (gets cancelled if successful)
|
||||
App.scheduler.set_timeout(component, "retry_timeout", 50, []() {
|
||||
ESP_LOGD("test", "Retry timeout executed");
|
||||
id(create_count)++;
|
||||
});
|
||||
|
||||
// Simulate successful operation - cancel retry
|
||||
App.scheduler.set_timeout(component, "success_sim", 20, []() {
|
||||
ESP_LOGD("test", "Operation succeeded, cancelling retry");
|
||||
App.scheduler.cancel_timeout(id(test_sensor), "retry_timeout");
|
||||
id(cancel_count)++;
|
||||
});
|
||||
|
||||
id(create_count) += 3;
|
||||
ESP_LOGI("test", "Phase 1 complete");
|
||||
|
||||
- id: test_sensor_polling
|
||||
then:
|
||||
- lambda: |-
|
||||
// Simulate sensor polling pattern
|
||||
ESP_LOGI("test", "Phase 2: Simulating sensor polling patterns");
|
||||
auto *component = id(test_sensor);
|
||||
|
||||
// Multiple sensors with different update intervals
|
||||
// These should only allocate once and reuse the same item for each interval execution
|
||||
App.scheduler.set_interval(component, "temp_sensor", 10, []() {
|
||||
ESP_LOGD("test", "Temperature sensor update");
|
||||
id(interval_counter)++;
|
||||
if (id(interval_counter) >= 3) {
|
||||
App.scheduler.cancel_interval(id(test_sensor), "temp_sensor");
|
||||
ESP_LOGD("test", "Temperature sensor stopped");
|
||||
}
|
||||
});
|
||||
|
||||
App.scheduler.set_interval(component, "humidity_sensor", 15, []() {
|
||||
ESP_LOGD("test", "Humidity sensor update");
|
||||
id(interval_counter)++;
|
||||
if (id(interval_counter) >= 5) {
|
||||
App.scheduler.cancel_interval(id(test_sensor), "humidity_sensor");
|
||||
ESP_LOGD("test", "Humidity sensor stopped");
|
||||
}
|
||||
});
|
||||
|
||||
// Only 2 allocations for the intervals, no matter how many times they execute
|
||||
id(create_count) += 2;
|
||||
ESP_LOGD("test", "Created 2 intervals - they will reuse same items for each execution");
|
||||
ESP_LOGI("test", "Phase 2 complete");
|
||||
|
||||
- id: test_communication_patterns
|
||||
then:
|
||||
- lambda: |-
|
||||
// Simulate communication patterns (WiFi/API reconnects, etc)
|
||||
ESP_LOGI("test", "Phase 3: Simulating communication patterns");
|
||||
auto *component = id(test_sensor);
|
||||
|
||||
// Connection timeout pattern
|
||||
App.scheduler.set_timeout(component, "connect_timeout", 200, []() {
|
||||
ESP_LOGD("test", "Connection timeout - would retry");
|
||||
id(create_count)++;
|
||||
|
||||
// Schedule retry
|
||||
App.scheduler.set_timeout(id(test_sensor), "connect_retry", 100, []() {
|
||||
ESP_LOGD("test", "Retrying connection");
|
||||
id(create_count)++;
|
||||
});
|
||||
});
|
||||
|
||||
// Heartbeat pattern
|
||||
App.scheduler.set_interval(component, "heartbeat", 50, []() {
|
||||
ESP_LOGD("test", "Heartbeat");
|
||||
id(interval_counter)++;
|
||||
if (id(interval_counter) >= 10) {
|
||||
App.scheduler.cancel_interval(id(test_sensor), "heartbeat");
|
||||
ESP_LOGD("test", "Heartbeat stopped");
|
||||
}
|
||||
});
|
||||
|
||||
id(create_count) += 2;
|
||||
ESP_LOGI("test", "Phase 3 complete");
|
||||
|
||||
- id: test_defer_patterns
|
||||
then:
|
||||
- lambda: |-
|
||||
// Simulate defer patterns (state changes, async operations)
|
||||
ESP_LOGI("test", "Phase 4: Simulating heavy defer patterns like ratgdo");
|
||||
|
||||
auto *component = id(test_sensor);
|
||||
|
||||
// Simulate a burst of defer operations like ratgdo does with state updates
|
||||
// These should execute immediately and recycle quickly to the pool
|
||||
for (int i = 0; i < 10; i++) {
|
||||
std::string defer_name = "defer_" + std::to_string(i);
|
||||
App.scheduler.set_timeout(component, defer_name, 0, [i]() {
|
||||
ESP_LOGD("test", "Defer %d executed", i);
|
||||
// Force a small delay between defer executions to see recycling
|
||||
if (i == 5) {
|
||||
ESP_LOGI("test", "Half of defers executed, checking pool status");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
id(create_count) += 10;
|
||||
ESP_LOGD("test", "Created 10 defer operations (0ms timeouts)");
|
||||
|
||||
// Also create some named defers that might get replaced
|
||||
App.scheduler.set_timeout(component, "state_update", 0, []() {
|
||||
ESP_LOGD("test", "State update 1");
|
||||
});
|
||||
|
||||
// Replace the same named defer (should cancel previous)
|
||||
App.scheduler.set_timeout(component, "state_update", 0, []() {
|
||||
ESP_LOGD("test", "State update 2 (replaced)");
|
||||
});
|
||||
|
||||
id(create_count) += 2;
|
||||
id(cancel_count) += 1; // One cancelled due to replacement
|
||||
|
||||
ESP_LOGI("test", "Phase 4 complete");
|
||||
|
||||
- id: test_pool_reuse_verification
|
||||
then:
|
||||
- lambda: |-
|
||||
ESP_LOGI("test", "Phase 5: Verifying pool reuse after everything settles");
|
||||
|
||||
// Cancel any remaining intervals
|
||||
auto *component = id(test_sensor);
|
||||
App.scheduler.cancel_interval(component, "temp_sensor");
|
||||
App.scheduler.cancel_interval(component, "humidity_sensor");
|
||||
App.scheduler.cancel_interval(component, "heartbeat");
|
||||
|
||||
ESP_LOGD("test", "Cancelled any remaining intervals");
|
||||
|
||||
// The pool should have items from completed timeouts in earlier phases.
|
||||
// Phase 1 had 3 timeouts that completed and were recycled.
|
||||
// Phase 3 had 1 timeout that completed and was recycled.
|
||||
// Phase 4 had 3 defers that completed and were recycled.
|
||||
// So we should have a decent pool size already from naturally completed items.
|
||||
|
||||
// Now create 8 new timeouts - they should reuse from pool when available
|
||||
int reuse_test_count = 8;
|
||||
|
||||
for (int i = 0; i < reuse_test_count; i++) {
|
||||
std::string name = "reuse_test_" + std::to_string(i);
|
||||
App.scheduler.set_timeout(component, name, 10 + i * 5, [i]() {
|
||||
ESP_LOGD("test", "Reuse test %d completed", i);
|
||||
});
|
||||
}
|
||||
|
||||
ESP_LOGI("test", "Created %d items for reuse verification", reuse_test_count);
|
||||
id(create_count) += reuse_test_count;
|
||||
ESP_LOGI("test", "Phase 5 complete");
|
||||
|
||||
- id: test_full_pool_reuse
|
||||
then:
|
||||
- lambda: |-
|
||||
ESP_LOGI("test", "Phase 6: Testing pool size limits after Phase 5 items complete");
|
||||
|
||||
// At this point, all Phase 5 timeouts should have completed and been recycled.
|
||||
// The pool should be at its maximum size (5).
|
||||
// Creating 10 new items tests that:
|
||||
// - First 5 items reuse from the pool
|
||||
// - Remaining 5 items allocate new (pool empty)
|
||||
// - Pool doesn't grow beyond MAX_POOL_SIZE of 5
|
||||
|
||||
auto *component = id(test_sensor);
|
||||
int full_reuse_count = 10;
|
||||
|
||||
for (int i = 0; i < full_reuse_count; i++) {
|
||||
std::string name = "full_reuse_" + std::to_string(i);
|
||||
App.scheduler.set_timeout(component, name, 10 + i * 5, [i]() {
|
||||
ESP_LOGD("test", "Full reuse test %d completed", i);
|
||||
});
|
||||
}
|
||||
|
||||
ESP_LOGI("test", "Created %d items for full pool reuse verification", full_reuse_count);
|
||||
id(create_count) += full_reuse_count;
|
||||
ESP_LOGI("test", "Phase 6 complete");
|
||||
|
||||
- id: test_same_defer_optimization
|
||||
then:
|
||||
- lambda: |-
|
||||
ESP_LOGI("test", "Phase 7: Testing same-named defer optimization");
|
||||
|
||||
auto *component = id(test_sensor);
|
||||
|
||||
// Create 10 defers with the same name - should optimize to update callback in-place
|
||||
// This pattern is common in components like ratgdo that repeatedly defer state updates
|
||||
for (int i = 0; i < 10; i++) {
|
||||
App.scheduler.set_timeout(component, "repeated_defer", 0, [i]() {
|
||||
ESP_LOGD("test", "Repeated defer executed with value: %d", i);
|
||||
});
|
||||
}
|
||||
|
||||
// Only the first should allocate, the rest should update in-place
|
||||
// We expect only 1 allocation for all 10 operations
|
||||
id(create_count) += 1; // Only count 1 since others should be optimized
|
||||
|
||||
ESP_LOGD("test", "Created 10 same-named defers (should only allocate once)");
|
||||
ESP_LOGI("test", "Phase 7 complete");
|
||||
|
||||
- id: complete_test
|
||||
then:
|
||||
- lambda: |-
|
||||
ESP_LOGI("test", "Pool recycling test complete - created %d items, cancelled %d, intervals %d",
|
||||
id(create_count), id(cancel_count), id(interval_counter));
|
||||
|
||||
sensor:
|
||||
- platform: template
|
||||
name: Test Sensor
|
||||
id: test_sensor
|
||||
lambda: return 1.0;
|
||||
update_interval: never
|
||||
|
||||
# No interval - tests will be triggered from Python via API services
|
@@ -30,9 +30,15 @@ async def test_gpio_expander_cache(
|
||||
|
||||
logs_done = asyncio.Event()
|
||||
|
||||
# Patterns to match in logs
|
||||
digital_read_hw_pattern = re.compile(r"digital_read_hw pin=(\d+)")
|
||||
digital_read_cache_pattern = re.compile(r"digital_read_cache pin=(\d+)")
|
||||
# Patterns to match in logs - match any variation of digital_read
|
||||
read_hw_pattern = re.compile(r"(?:uint16_)?digital_read_hw pin=(\d+)")
|
||||
read_cache_pattern = re.compile(r"(?:uint16_)?digital_read_cache pin=(\d+)")
|
||||
|
||||
# Keep specific patterns for building the expected order
|
||||
digital_read_hw_pattern = re.compile(r"^digital_read_hw pin=(\d+)")
|
||||
digital_read_cache_pattern = re.compile(r"^digital_read_cache pin=(\d+)")
|
||||
uint16_read_hw_pattern = re.compile(r"^uint16_digital_read_hw pin=(\d+)")
|
||||
uint16_read_cache_pattern = re.compile(r"^uint16_digital_read_cache pin=(\d+)")
|
||||
|
||||
# ensure logs are in the expected order
|
||||
log_order = [
|
||||
@@ -59,6 +65,17 @@ async def test_gpio_expander_cache(
|
||||
(digital_read_cache_pattern, 14),
|
||||
(digital_read_hw_pattern, 14),
|
||||
(digital_read_cache_pattern, 14),
|
||||
# uint16_t component tests (single bank of 16 pins)
|
||||
(uint16_read_hw_pattern, 0), # First pin triggers hw read
|
||||
[
|
||||
(uint16_read_cache_pattern, i) for i in range(0, 16)
|
||||
], # All 16 pins return via cache
|
||||
# After cache reset
|
||||
(uint16_read_hw_pattern, 5), # First read after reset triggers hw
|
||||
(uint16_read_cache_pattern, 5),
|
||||
(uint16_read_cache_pattern, 10), # These use cache (same bank)
|
||||
(uint16_read_cache_pattern, 15),
|
||||
(uint16_read_cache_pattern, 0),
|
||||
]
|
||||
# Flatten the log order for easier processing
|
||||
log_order: list[tuple[re.Pattern, int]] = [
|
||||
@@ -77,17 +94,22 @@ async def test_gpio_expander_cache(
|
||||
|
||||
clean_line = re.sub(r"\x1b\[[0-9;]*m", "", line)
|
||||
|
||||
if "digital_read" in clean_line:
|
||||
# Extract just the log message part (after the log level)
|
||||
msg = clean_line.split(": ", 1)[-1] if ": " in clean_line else clean_line
|
||||
|
||||
# Check if this line contains a read operation we're tracking
|
||||
if read_hw_pattern.search(msg) or read_cache_pattern.search(msg):
|
||||
if index >= len(log_order):
|
||||
print(f"Received unexpected log line: {clean_line}")
|
||||
print(f"Received unexpected log line: {msg}")
|
||||
logs_done.set()
|
||||
return
|
||||
|
||||
pattern, expected_pin = log_order[index]
|
||||
match = pattern.search(clean_line)
|
||||
match = pattern.search(msg)
|
||||
|
||||
if not match:
|
||||
print(f"Log line did not match next expected pattern: {clean_line}")
|
||||
print(f"Log line did not match next expected pattern: {msg}")
|
||||
print(f"Expected pattern: {pattern.pattern}")
|
||||
logs_done.set()
|
||||
return
|
||||
|
||||
@@ -99,8 +121,9 @@ async def test_gpio_expander_cache(
|
||||
|
||||
index += 1
|
||||
|
||||
elif "DONE" in clean_line:
|
||||
# Check if we reached the end of the expected log entries
|
||||
elif "DONE_UINT16" in clean_line:
|
||||
# uint16 component is done, check if we've seen all expected logs
|
||||
if index == len(log_order):
|
||||
logs_done.set()
|
||||
|
||||
# Run with log monitoring
|
||||
|
209
tests/integration/test_scheduler_pool.py
Normal file
209
tests/integration/test_scheduler_pool.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""Integration test for scheduler memory pool functionality."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
from .types import APIClientConnectedFactory, RunCompiledFunction
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scheduler_pool(
|
||||
yaml_config: str,
|
||||
run_compiled: RunCompiledFunction,
|
||||
api_client_connected: APIClientConnectedFactory,
|
||||
) -> None:
|
||||
"""Test that the scheduler memory pool is working correctly with realistic usage.
|
||||
|
||||
This test simulates real-world scheduler usage patterns and verifies that:
|
||||
1. Items are recycled to the pool when timeouts complete naturally
|
||||
2. Items are recycled when intervals/timeouts are cancelled
|
||||
3. Items are reused from the pool for new scheduler operations
|
||||
4. The pool grows gradually based on actual usage patterns
|
||||
5. Pool operations are logged correctly with debug scheduler enabled
|
||||
"""
|
||||
# Track log messages to verify pool behavior
|
||||
log_lines: list[str] = []
|
||||
pool_reuse_count = 0
|
||||
pool_recycle_count = 0
|
||||
pool_full_count = 0
|
||||
new_alloc_count = 0
|
||||
|
||||
# Patterns to match pool operations
|
||||
reuse_pattern = re.compile(r"Reused item from pool \(pool size now: (\d+)\)")
|
||||
recycle_pattern = re.compile(r"Recycled item to pool \(pool size now: (\d+)\)")
|
||||
pool_full_pattern = re.compile(r"Pool full \(size: (\d+)\), deleting item")
|
||||
new_alloc_pattern = re.compile(r"Allocated new item \(pool empty\)")
|
||||
|
||||
# Futures to track when test phases complete
|
||||
loop = asyncio.get_running_loop()
|
||||
test_complete_future: asyncio.Future[bool] = loop.create_future()
|
||||
phase_futures = {
|
||||
1: loop.create_future(),
|
||||
2: loop.create_future(),
|
||||
3: loop.create_future(),
|
||||
4: loop.create_future(),
|
||||
5: loop.create_future(),
|
||||
6: loop.create_future(),
|
||||
7: loop.create_future(),
|
||||
}
|
||||
|
||||
def check_output(line: str) -> None:
|
||||
"""Check log output for pool operations and phase completion."""
|
||||
nonlocal pool_reuse_count, pool_recycle_count, pool_full_count, new_alloc_count
|
||||
log_lines.append(line)
|
||||
|
||||
# Track pool operations
|
||||
if reuse_pattern.search(line):
|
||||
pool_reuse_count += 1
|
||||
|
||||
elif recycle_pattern.search(line):
|
||||
pool_recycle_count += 1
|
||||
|
||||
elif pool_full_pattern.search(line):
|
||||
pool_full_count += 1
|
||||
|
||||
elif new_alloc_pattern.search(line):
|
||||
new_alloc_count += 1
|
||||
|
||||
# Track phase completion
|
||||
for phase_num in range(1, 8):
|
||||
if (
|
||||
f"Phase {phase_num} complete" in line
|
||||
and phase_num in phase_futures
|
||||
and not phase_futures[phase_num].done()
|
||||
):
|
||||
phase_futures[phase_num].set_result(True)
|
||||
|
||||
# Check for test completion
|
||||
if "Pool recycling test complete" in line and not test_complete_future.done():
|
||||
test_complete_future.set_result(True)
|
||||
|
||||
# Run the test with log monitoring
|
||||
async with (
|
||||
run_compiled(yaml_config, line_callback=check_output),
|
||||
api_client_connected() as client,
|
||||
):
|
||||
# Verify device is running
|
||||
device_info = await client.device_info()
|
||||
assert device_info is not None
|
||||
assert device_info.name == "scheduler-pool-test"
|
||||
|
||||
# Get list of services
|
||||
entities, services = await client.list_entities_services()
|
||||
service_names = {s.name for s in services}
|
||||
|
||||
# Verify all test services are available
|
||||
expected_services = {
|
||||
"run_phase_1",
|
||||
"run_phase_2",
|
||||
"run_phase_3",
|
||||
"run_phase_4",
|
||||
"run_phase_5",
|
||||
"run_phase_6",
|
||||
"run_phase_7",
|
||||
"run_complete",
|
||||
}
|
||||
assert expected_services.issubset(service_names), (
|
||||
f"Missing services: {expected_services - service_names}"
|
||||
)
|
||||
|
||||
# Get service objects
|
||||
phase_services = {
|
||||
num: next(s for s in services if s.name == f"run_phase_{num}")
|
||||
for num in range(1, 8)
|
||||
}
|
||||
complete_service = next(s for s in services if s.name == "run_complete")
|
||||
|
||||
try:
|
||||
# Phase 1: Component lifecycle
|
||||
client.execute_service(phase_services[1], {})
|
||||
await asyncio.wait_for(phase_futures[1], timeout=1.0)
|
||||
await asyncio.sleep(0.05) # Let timeouts complete
|
||||
|
||||
# Phase 2: Sensor polling
|
||||
client.execute_service(phase_services[2], {})
|
||||
await asyncio.wait_for(phase_futures[2], timeout=1.0)
|
||||
await asyncio.sleep(0.1) # Let intervals run a bit
|
||||
|
||||
# Phase 3: Communication patterns
|
||||
client.execute_service(phase_services[3], {})
|
||||
await asyncio.wait_for(phase_futures[3], timeout=1.0)
|
||||
await asyncio.sleep(0.1) # Let heartbeat run
|
||||
|
||||
# Phase 4: Defer patterns
|
||||
client.execute_service(phase_services[4], {})
|
||||
await asyncio.wait_for(phase_futures[4], timeout=1.0)
|
||||
await asyncio.sleep(0.2) # Let everything settle and recycle
|
||||
|
||||
# Phase 5: Pool reuse verification
|
||||
client.execute_service(phase_services[5], {})
|
||||
await asyncio.wait_for(phase_futures[5], timeout=1.0)
|
||||
await asyncio.sleep(0.1) # Let Phase 5 timeouts complete and recycle
|
||||
|
||||
# Phase 6: Full pool reuse verification
|
||||
client.execute_service(phase_services[6], {})
|
||||
await asyncio.wait_for(phase_futures[6], timeout=1.0)
|
||||
await asyncio.sleep(0.1) # Let Phase 6 timeouts complete
|
||||
|
||||
# Phase 7: Same-named defer optimization
|
||||
client.execute_service(phase_services[7], {})
|
||||
await asyncio.wait_for(phase_futures[7], timeout=1.0)
|
||||
await asyncio.sleep(0.05) # Let the single defer execute
|
||||
|
||||
# Complete test
|
||||
client.execute_service(complete_service, {})
|
||||
await asyncio.wait_for(test_complete_future, timeout=0.5)
|
||||
|
||||
except TimeoutError as e:
|
||||
# Print debug info if test times out
|
||||
recent_logs = "\n".join(log_lines[-30:])
|
||||
phases_completed = [num for num, fut in phase_futures.items() if fut.done()]
|
||||
pytest.fail(
|
||||
f"Test timed out waiting for phase/completion. Error: {e}\n"
|
||||
f" Phases completed: {phases_completed}\n"
|
||||
f" Pool stats:\n"
|
||||
f" Reuse count: {pool_reuse_count}\n"
|
||||
f" Recycle count: {pool_recycle_count}\n"
|
||||
f" Pool full count: {pool_full_count}\n"
|
||||
f" New alloc count: {new_alloc_count}\n"
|
||||
f"Recent logs:\n{recent_logs}"
|
||||
)
|
||||
|
||||
# Verify all test phases ran
|
||||
for phase_num in range(1, 8):
|
||||
assert phase_futures[phase_num].done(), f"Phase {phase_num} did not complete"
|
||||
|
||||
# Verify pool behavior
|
||||
assert pool_recycle_count > 0, "Should have recycled items to pool"
|
||||
|
||||
# Check pool metrics
|
||||
if pool_recycle_count > 0:
|
||||
max_pool_size = 0
|
||||
for line in log_lines:
|
||||
if match := recycle_pattern.search(line):
|
||||
size = int(match.group(1))
|
||||
max_pool_size = max(max_pool_size, size)
|
||||
|
||||
# Pool can grow up to its maximum of 5
|
||||
assert max_pool_size <= 5, f"Pool grew beyond maximum ({max_pool_size})"
|
||||
|
||||
# Log summary for debugging
|
||||
print("\nScheduler Pool Test Summary (Python Orchestrated):")
|
||||
print(f" Items recycled to pool: {pool_recycle_count}")
|
||||
print(f" Items reused from pool: {pool_reuse_count}")
|
||||
print(f" Pool full events: {pool_full_count}")
|
||||
print(f" New allocations: {new_alloc_count}")
|
||||
print(" All phases completed successfully")
|
||||
|
||||
# Verify reuse happened
|
||||
if pool_reuse_count == 0 and pool_recycle_count > 3:
|
||||
pytest.fail("Pool had items recycled but none were reused")
|
||||
|
||||
# Success - pool is working
|
||||
assert pool_recycle_count > 0 or new_alloc_count < 15, (
|
||||
"Pool should either recycle items or limit new allocations"
|
||||
)
|
@@ -1,8 +1,14 @@
|
||||
import logging
|
||||
import socket
|
||||
from unittest.mock import patch
|
||||
|
||||
from aioesphomeapi.host_resolver import AddrInfo, IPv4Sockaddr, IPv6Sockaddr
|
||||
from hypothesis import given
|
||||
from hypothesis.strategies import ip_addresses
|
||||
import pytest
|
||||
|
||||
from esphome import helpers
|
||||
from esphome.core import EsphomeError
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -277,3 +283,314 @@ def test_sort_ip_addresses(text: list[str], expected: list[str]) -> None:
|
||||
actual = helpers.sort_ip_addresses(text)
|
||||
|
||||
assert actual == expected
|
||||
|
||||
|
||||
# DNS resolution tests
|
||||
def test_is_ip_address_ipv4() -> None:
|
||||
"""Test is_ip_address with IPv4 addresses."""
|
||||
assert helpers.is_ip_address("192.168.1.1") is True
|
||||
assert helpers.is_ip_address("127.0.0.1") is True
|
||||
assert helpers.is_ip_address("255.255.255.255") is True
|
||||
assert helpers.is_ip_address("0.0.0.0") is True
|
||||
|
||||
|
||||
def test_is_ip_address_ipv6() -> None:
|
||||
"""Test is_ip_address with IPv6 addresses."""
|
||||
assert helpers.is_ip_address("::1") is True
|
||||
assert helpers.is_ip_address("2001:db8::1") is True
|
||||
assert helpers.is_ip_address("fe80::1") is True
|
||||
assert helpers.is_ip_address("::") is True
|
||||
|
||||
|
||||
def test_is_ip_address_invalid() -> None:
|
||||
"""Test is_ip_address with non-IP strings."""
|
||||
assert helpers.is_ip_address("hostname") is False
|
||||
assert helpers.is_ip_address("hostname.local") is False
|
||||
assert helpers.is_ip_address("256.256.256.256") is False
|
||||
assert helpers.is_ip_address("192.168.1") is False
|
||||
assert helpers.is_ip_address("") is False
|
||||
|
||||
|
||||
def test_resolve_ip_address_single_ipv4() -> None:
|
||||
"""Test resolving a single IPv4 address (fast path)."""
|
||||
result = helpers.resolve_ip_address("192.168.1.100", 6053)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == socket.AF_INET # family
|
||||
assert result[0][1] in (
|
||||
0,
|
||||
socket.SOCK_STREAM,
|
||||
) # type (0 on Windows with AI_NUMERICHOST)
|
||||
assert result[0][2] in (
|
||||
0,
|
||||
socket.IPPROTO_TCP,
|
||||
) # proto (0 on Windows with AI_NUMERICHOST)
|
||||
assert result[0][3] == "" # canonname
|
||||
assert result[0][4] == ("192.168.1.100", 6053) # sockaddr
|
||||
|
||||
|
||||
def test_resolve_ip_address_single_ipv6() -> None:
|
||||
"""Test resolving a single IPv6 address (fast path)."""
|
||||
result = helpers.resolve_ip_address("::1", 6053)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == socket.AF_INET6 # family
|
||||
assert result[0][1] in (
|
||||
0,
|
||||
socket.SOCK_STREAM,
|
||||
) # type (0 on Windows with AI_NUMERICHOST)
|
||||
assert result[0][2] in (
|
||||
0,
|
||||
socket.IPPROTO_TCP,
|
||||
) # proto (0 on Windows with AI_NUMERICHOST)
|
||||
assert result[0][3] == "" # canonname
|
||||
# IPv6 sockaddr has 4 elements
|
||||
assert len(result[0][4]) == 4
|
||||
assert result[0][4][0] == "::1" # address
|
||||
assert result[0][4][1] == 6053 # port
|
||||
|
||||
|
||||
def test_resolve_ip_address_list_of_ips() -> None:
|
||||
"""Test resolving a list of IP addresses (fast path)."""
|
||||
ips = ["192.168.1.100", "10.0.0.1", "::1"]
|
||||
result = helpers.resolve_ip_address(ips, 6053)
|
||||
|
||||
# Should return results sorted by preference (IPv6 first, then IPv4)
|
||||
assert len(result) >= 2 # At least IPv4 addresses should work
|
||||
|
||||
# Check that results are properly formatted
|
||||
for addr_info in result:
|
||||
assert addr_info[0] in (socket.AF_INET, socket.AF_INET6)
|
||||
assert addr_info[1] in (
|
||||
0,
|
||||
socket.SOCK_STREAM,
|
||||
) # 0 on Windows with AI_NUMERICHOST
|
||||
assert addr_info[2] in (
|
||||
0,
|
||||
socket.IPPROTO_TCP,
|
||||
) # 0 on Windows with AI_NUMERICHOST
|
||||
assert addr_info[3] == ""
|
||||
|
||||
|
||||
def test_resolve_ip_address_with_getaddrinfo_failure(caplog) -> None:
|
||||
"""Test that getaddrinfo OSError is handled gracefully in fast path."""
|
||||
with (
|
||||
caplog.at_level(logging.DEBUG),
|
||||
patch("socket.getaddrinfo") as mock_getaddrinfo,
|
||||
):
|
||||
# First IP succeeds
|
||||
mock_getaddrinfo.side_effect = [
|
||||
[
|
||||
(
|
||||
socket.AF_INET,
|
||||
socket.SOCK_STREAM,
|
||||
socket.IPPROTO_TCP,
|
||||
"",
|
||||
("192.168.1.100", 6053),
|
||||
)
|
||||
],
|
||||
OSError("Failed to resolve"), # Second IP fails
|
||||
]
|
||||
|
||||
# Should continue despite one failure
|
||||
result = helpers.resolve_ip_address(["192.168.1.100", "192.168.1.101"], 6053)
|
||||
|
||||
# Should have result from first IP only
|
||||
assert len(result) == 1
|
||||
assert result[0][4][0] == "192.168.1.100"
|
||||
|
||||
# Verify both IPs were attempted
|
||||
assert mock_getaddrinfo.call_count == 2
|
||||
mock_getaddrinfo.assert_any_call(
|
||||
"192.168.1.100", 6053, proto=socket.IPPROTO_TCP, flags=socket.AI_NUMERICHOST
|
||||
)
|
||||
mock_getaddrinfo.assert_any_call(
|
||||
"192.168.1.101", 6053, proto=socket.IPPROTO_TCP, flags=socket.AI_NUMERICHOST
|
||||
)
|
||||
|
||||
# Verify the debug log was called for the failed IP
|
||||
assert "Failed to parse IP address '192.168.1.101'" in caplog.text
|
||||
|
||||
|
||||
def test_resolve_ip_address_hostname() -> None:
|
||||
"""Test resolving a hostname (async resolver path)."""
|
||||
mock_addr_info = AddrInfo(
|
||||
family=socket.AF_INET,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv4Sockaddr(address="192.168.1.100", port=6053),
|
||||
)
|
||||
|
||||
with patch("esphome.resolver.AsyncResolver") as MockResolver:
|
||||
mock_resolver = MockResolver.return_value
|
||||
mock_resolver.resolve.return_value = [mock_addr_info]
|
||||
|
||||
result = helpers.resolve_ip_address("test.local", 6053)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == socket.AF_INET
|
||||
assert result[0][4] == ("192.168.1.100", 6053)
|
||||
MockResolver.assert_called_once_with(["test.local"], 6053)
|
||||
mock_resolver.resolve.assert_called_once()
|
||||
|
||||
|
||||
def test_resolve_ip_address_mixed_list() -> None:
|
||||
"""Test resolving a mix of IPs and hostnames."""
|
||||
mock_addr_info = AddrInfo(
|
||||
family=socket.AF_INET,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv4Sockaddr(address="192.168.1.200", port=6053),
|
||||
)
|
||||
|
||||
with patch("esphome.resolver.AsyncResolver") as MockResolver:
|
||||
mock_resolver = MockResolver.return_value
|
||||
mock_resolver.resolve.return_value = [mock_addr_info]
|
||||
|
||||
# Mix of IP and hostname - should use async resolver
|
||||
result = helpers.resolve_ip_address(["192.168.1.100", "test.local"], 6053)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][4][0] == "192.168.1.200"
|
||||
MockResolver.assert_called_once_with(["192.168.1.100", "test.local"], 6053)
|
||||
mock_resolver.resolve.assert_called_once()
|
||||
|
||||
|
||||
def test_resolve_ip_address_url() -> None:
|
||||
"""Test extracting hostname from URL."""
|
||||
mock_addr_info = AddrInfo(
|
||||
family=socket.AF_INET,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv4Sockaddr(address="192.168.1.100", port=6053),
|
||||
)
|
||||
|
||||
with patch("esphome.resolver.AsyncResolver") as MockResolver:
|
||||
mock_resolver = MockResolver.return_value
|
||||
mock_resolver.resolve.return_value = [mock_addr_info]
|
||||
|
||||
result = helpers.resolve_ip_address("http://test.local", 6053)
|
||||
|
||||
assert len(result) == 1
|
||||
MockResolver.assert_called_once_with(["test.local"], 6053)
|
||||
mock_resolver.resolve.assert_called_once()
|
||||
|
||||
|
||||
def test_resolve_ip_address_ipv6_conversion() -> None:
|
||||
"""Test proper IPv6 address info conversion."""
|
||||
mock_addr_info = AddrInfo(
|
||||
family=socket.AF_INET6,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv6Sockaddr(address="2001:db8::1", port=6053, flowinfo=1, scope_id=2),
|
||||
)
|
||||
|
||||
with patch("esphome.resolver.AsyncResolver") as MockResolver:
|
||||
mock_resolver = MockResolver.return_value
|
||||
mock_resolver.resolve.return_value = [mock_addr_info]
|
||||
|
||||
result = helpers.resolve_ip_address("test.local", 6053)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == socket.AF_INET6
|
||||
assert result[0][4] == ("2001:db8::1", 6053, 1, 2)
|
||||
|
||||
|
||||
def test_resolve_ip_address_error_handling() -> None:
|
||||
"""Test error handling from AsyncResolver."""
|
||||
with patch("esphome.resolver.AsyncResolver") as MockResolver:
|
||||
mock_resolver = MockResolver.return_value
|
||||
mock_resolver.resolve.side_effect = EsphomeError("Resolution failed")
|
||||
|
||||
with pytest.raises(EsphomeError, match="Resolution failed"):
|
||||
helpers.resolve_ip_address("test.local", 6053)
|
||||
|
||||
|
||||
def test_addr_preference_ipv4() -> None:
|
||||
"""Test address preference for IPv4."""
|
||||
addr_info = (
|
||||
socket.AF_INET,
|
||||
socket.SOCK_STREAM,
|
||||
socket.IPPROTO_TCP,
|
||||
"",
|
||||
("192.168.1.1", 6053),
|
||||
)
|
||||
assert helpers.addr_preference_(addr_info) == 2
|
||||
|
||||
|
||||
def test_addr_preference_ipv6() -> None:
|
||||
"""Test address preference for regular IPv6."""
|
||||
addr_info = (
|
||||
socket.AF_INET6,
|
||||
socket.SOCK_STREAM,
|
||||
socket.IPPROTO_TCP,
|
||||
"",
|
||||
("2001:db8::1", 6053, 0, 0),
|
||||
)
|
||||
assert helpers.addr_preference_(addr_info) == 1
|
||||
|
||||
|
||||
def test_addr_preference_ipv6_link_local_no_scope() -> None:
|
||||
"""Test address preference for link-local IPv6 without scope."""
|
||||
addr_info = (
|
||||
socket.AF_INET6,
|
||||
socket.SOCK_STREAM,
|
||||
socket.IPPROTO_TCP,
|
||||
"",
|
||||
("fe80::1", 6053, 0, 0), # link-local with scope_id=0
|
||||
)
|
||||
assert helpers.addr_preference_(addr_info) == 3
|
||||
|
||||
|
||||
def test_addr_preference_ipv6_link_local_with_scope() -> None:
|
||||
"""Test address preference for link-local IPv6 with scope."""
|
||||
addr_info = (
|
||||
socket.AF_INET6,
|
||||
socket.SOCK_STREAM,
|
||||
socket.IPPROTO_TCP,
|
||||
"",
|
||||
("fe80::1", 6053, 0, 2), # link-local with scope_id=2
|
||||
)
|
||||
assert helpers.addr_preference_(addr_info) == 1 # Has scope, so it's usable
|
||||
|
||||
|
||||
def test_resolve_ip_address_sorting() -> None:
|
||||
"""Test that results are sorted by preference."""
|
||||
# Create multiple address infos with different preferences
|
||||
mock_addr_infos = [
|
||||
AddrInfo(
|
||||
family=socket.AF_INET6,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv6Sockaddr(
|
||||
address="fe80::1", port=6053, flowinfo=0, scope_id=0
|
||||
), # Preference 3 (link-local no scope)
|
||||
),
|
||||
AddrInfo(
|
||||
family=socket.AF_INET,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv4Sockaddr(
|
||||
address="192.168.1.100", port=6053
|
||||
), # Preference 2 (IPv4)
|
||||
),
|
||||
AddrInfo(
|
||||
family=socket.AF_INET6,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv6Sockaddr(
|
||||
address="2001:db8::1", port=6053, flowinfo=0, scope_id=0
|
||||
), # Preference 1 (IPv6)
|
||||
),
|
||||
]
|
||||
|
||||
with patch("esphome.resolver.AsyncResolver") as MockResolver:
|
||||
mock_resolver = MockResolver.return_value
|
||||
mock_resolver.resolve.return_value = mock_addr_infos
|
||||
|
||||
result = helpers.resolve_ip_address("test.local", 6053)
|
||||
|
||||
# Should be sorted: IPv6 first, then IPv4, then link-local without scope
|
||||
assert result[0][4][0] == "2001:db8::1" # IPv6 (preference 1)
|
||||
assert result[1][4][0] == "192.168.1.100" # IPv4 (preference 2)
|
||||
assert result[2][4][0] == "fe80::1" # Link-local no scope (preference 3)
|
||||
|
169
tests/unit_tests/test_resolver.py
Normal file
169
tests/unit_tests/test_resolver.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""Tests for the DNS resolver module."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import socket
|
||||
from unittest.mock import patch
|
||||
|
||||
from aioesphomeapi.core import ResolveAPIError, ResolveTimeoutAPIError
|
||||
from aioesphomeapi.host_resolver import AddrInfo, IPv4Sockaddr, IPv6Sockaddr
|
||||
import pytest
|
||||
|
||||
from esphome.core import EsphomeError
|
||||
from esphome.resolver import RESOLVE_TIMEOUT, AsyncResolver
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_addr_info_ipv4() -> AddrInfo:
|
||||
"""Create a mock IPv4 AddrInfo."""
|
||||
return AddrInfo(
|
||||
family=socket.AF_INET,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv4Sockaddr(address="192.168.1.100", port=6053),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_addr_info_ipv6() -> AddrInfo:
|
||||
"""Create a mock IPv6 AddrInfo."""
|
||||
return AddrInfo(
|
||||
family=socket.AF_INET6,
|
||||
type=socket.SOCK_STREAM,
|
||||
proto=socket.IPPROTO_TCP,
|
||||
sockaddr=IPv6Sockaddr(address="2001:db8::1", port=6053, flowinfo=0, scope_id=0),
|
||||
)
|
||||
|
||||
|
||||
def test_async_resolver_successful_resolution(mock_addr_info_ipv4: AddrInfo) -> None:
|
||||
"""Test successful DNS resolution."""
|
||||
with patch(
|
||||
"esphome.resolver.hr.async_resolve_host",
|
||||
return_value=[mock_addr_info_ipv4],
|
||||
) as mock_resolve:
|
||||
resolver = AsyncResolver(["test.local"], 6053)
|
||||
result = resolver.resolve()
|
||||
|
||||
assert result == [mock_addr_info_ipv4]
|
||||
mock_resolve.assert_called_once_with(
|
||||
["test.local"], 6053, timeout=RESOLVE_TIMEOUT
|
||||
)
|
||||
|
||||
|
||||
def test_async_resolver_multiple_hosts(
|
||||
mock_addr_info_ipv4: AddrInfo, mock_addr_info_ipv6: AddrInfo
|
||||
) -> None:
|
||||
"""Test resolving multiple hosts."""
|
||||
mock_results = [mock_addr_info_ipv4, mock_addr_info_ipv6]
|
||||
|
||||
with patch(
|
||||
"esphome.resolver.hr.async_resolve_host",
|
||||
return_value=mock_results,
|
||||
) as mock_resolve:
|
||||
resolver = AsyncResolver(["test1.local", "test2.local"], 6053)
|
||||
result = resolver.resolve()
|
||||
|
||||
assert result == mock_results
|
||||
mock_resolve.assert_called_once_with(
|
||||
["test1.local", "test2.local"], 6053, timeout=RESOLVE_TIMEOUT
|
||||
)
|
||||
|
||||
|
||||
def test_async_resolver_resolve_api_error() -> None:
|
||||
"""Test handling of ResolveAPIError."""
|
||||
error_msg = "Failed to resolve"
|
||||
with patch(
|
||||
"esphome.resolver.hr.async_resolve_host",
|
||||
side_effect=ResolveAPIError(error_msg),
|
||||
):
|
||||
resolver = AsyncResolver(["test.local"], 6053)
|
||||
with pytest.raises(
|
||||
EsphomeError, match=re.escape(f"Error resolving IP address: {error_msg}")
|
||||
):
|
||||
resolver.resolve()
|
||||
|
||||
|
||||
def test_async_resolver_timeout_error() -> None:
|
||||
"""Test handling of ResolveTimeoutAPIError."""
|
||||
error_msg = "Resolution timed out"
|
||||
|
||||
with patch(
|
||||
"esphome.resolver.hr.async_resolve_host",
|
||||
side_effect=ResolveTimeoutAPIError(error_msg),
|
||||
):
|
||||
resolver = AsyncResolver(["test.local"], 6053)
|
||||
# Match either "Timeout" or "Error" since ResolveTimeoutAPIError is a subclass of ResolveAPIError
|
||||
# and depending on import order/test execution context, it might be caught as either
|
||||
with pytest.raises(
|
||||
EsphomeError,
|
||||
match=f"(Timeout|Error) resolving IP address: {re.escape(error_msg)}",
|
||||
):
|
||||
resolver.resolve()
|
||||
|
||||
|
||||
def test_async_resolver_generic_exception() -> None:
|
||||
"""Test handling of generic exceptions."""
|
||||
error = RuntimeError("Unexpected error")
|
||||
with patch(
|
||||
"esphome.resolver.hr.async_resolve_host",
|
||||
side_effect=error,
|
||||
):
|
||||
resolver = AsyncResolver(["test.local"], 6053)
|
||||
with pytest.raises(RuntimeError, match="Unexpected error"):
|
||||
resolver.resolve()
|
||||
|
||||
|
||||
def test_async_resolver_thread_timeout() -> None:
|
||||
"""Test timeout when thread doesn't complete in time."""
|
||||
# Mock the start method to prevent actual thread execution
|
||||
with (
|
||||
patch.object(AsyncResolver, "start"),
|
||||
patch("esphome.resolver.hr.async_resolve_host"),
|
||||
):
|
||||
resolver = AsyncResolver(["test.local"], 6053)
|
||||
# Override event.wait to simulate timeout (return False = timeout occurred)
|
||||
with (
|
||||
patch.object(resolver.event, "wait", return_value=False),
|
||||
pytest.raises(
|
||||
EsphomeError, match=re.escape("Timeout resolving IP address")
|
||||
),
|
||||
):
|
||||
resolver.resolve()
|
||||
|
||||
# Verify thread start was called
|
||||
resolver.start.assert_called_once()
|
||||
|
||||
|
||||
def test_async_resolver_ip_addresses(mock_addr_info_ipv4: AddrInfo) -> None:
|
||||
"""Test resolving IP addresses."""
|
||||
with patch(
|
||||
"esphome.resolver.hr.async_resolve_host",
|
||||
return_value=[mock_addr_info_ipv4],
|
||||
) as mock_resolve:
|
||||
resolver = AsyncResolver(["192.168.1.100"], 6053)
|
||||
result = resolver.resolve()
|
||||
|
||||
assert result == [mock_addr_info_ipv4]
|
||||
mock_resolve.assert_called_once_with(
|
||||
["192.168.1.100"], 6053, timeout=RESOLVE_TIMEOUT
|
||||
)
|
||||
|
||||
|
||||
def test_async_resolver_mixed_addresses(
|
||||
mock_addr_info_ipv4: AddrInfo, mock_addr_info_ipv6: AddrInfo
|
||||
) -> None:
|
||||
"""Test resolving mix of hostnames and IP addresses."""
|
||||
mock_results = [mock_addr_info_ipv4, mock_addr_info_ipv6]
|
||||
|
||||
with patch(
|
||||
"esphome.resolver.hr.async_resolve_host",
|
||||
return_value=mock_results,
|
||||
) as mock_resolve:
|
||||
resolver = AsyncResolver(["test.local", "192.168.1.100", "::1"], 6053)
|
||||
result = resolver.resolve()
|
||||
|
||||
assert result == mock_results
|
||||
mock_resolve.assert_called_once_with(
|
||||
["test.local", "192.168.1.100", "::1"], 6053, timeout=RESOLVE_TIMEOUT
|
||||
)
|
Reference in New Issue
Block a user