mirror of
				https://github.com/esphome/esphome.git
				synced 2025-10-30 22:53:59 +00:00 
			
		
		
		
	Merge remote-tracking branch 'upstream/dev' into component_source_logstring
This commit is contained in:
		| @@ -342,7 +342,7 @@ esphome/components/ota/* @esphome/core | |||||||
| esphome/components/output/* @esphome/core | esphome/components/output/* @esphome/core | ||||||
| esphome/components/packet_transport/* @clydebarrow | esphome/components/packet_transport/* @clydebarrow | ||||||
| esphome/components/pca6416a/* @Mat931 | esphome/components/pca6416a/* @Mat931 | ||||||
| esphome/components/pca9554/* @clydebarrow @hwstar | esphome/components/pca9554/* @bdraco @clydebarrow @hwstar | ||||||
| esphome/components/pcf85063/* @brogon | esphome/components/pcf85063/* @brogon | ||||||
| esphome/components/pcf8563/* @KoenBreeman | esphome/components/pcf8563/* @KoenBreeman | ||||||
| esphome/components/pi4ioe5v6408/* @jesserockz | esphome/components/pi4ioe5v6408/* @jesserockz | ||||||
|   | |||||||
| @@ -396,7 +396,10 @@ def check_permissions(port: str): | |||||||
|             ) |             ) | ||||||
|  |  | ||||||
|  |  | ||||||
| def upload_program(config: ConfigType, args: ArgsProtocol, host: str) -> int | str: | def upload_program( | ||||||
|  |     config: ConfigType, args: ArgsProtocol, devices: list[str] | ||||||
|  | ) -> int | str: | ||||||
|  |     host = devices[0] | ||||||
|     try: |     try: | ||||||
|         module = importlib.import_module("esphome.components." + CORE.target_platform) |         module = importlib.import_module("esphome.components." + CORE.target_platform) | ||||||
|         if getattr(module, "upload_program")(config, args, host): |         if getattr(module, "upload_program")(config, args, host): | ||||||
| @@ -433,10 +436,10 @@ def upload_program(config: ConfigType, args: ArgsProtocol, host: str) -> int | s | |||||||
|  |  | ||||||
|     remote_port = int(ota_conf[CONF_PORT]) |     remote_port = int(ota_conf[CONF_PORT]) | ||||||
|     password = ota_conf.get(CONF_PASSWORD, "") |     password = ota_conf.get(CONF_PASSWORD, "") | ||||||
|  |     binary = args.file if getattr(args, "file", None) is not None else CORE.firmware_bin | ||||||
|  |  | ||||||
|     # Check if we should use MQTT for address resolution |     # Check if we should use MQTT for address resolution | ||||||
|     # This happens when no device was specified, or the current host is "MQTT"/"OTA" |     # This happens when no device was specified, or the current host is "MQTT"/"OTA" | ||||||
|     devices: list[str] = args.device or [] |  | ||||||
|     if ( |     if ( | ||||||
|         CONF_MQTT in config  # pylint: disable=too-many-boolean-expressions |         CONF_MQTT in config  # pylint: disable=too-many-boolean-expressions | ||||||
|         and (not devices or host in ("MQTT", "OTA")) |         and (not devices or host in ("MQTT", "OTA")) | ||||||
| @@ -447,14 +450,13 @@ def upload_program(config: ConfigType, args: ArgsProtocol, host: str) -> int | s | |||||||
|     ): |     ): | ||||||
|         from esphome import mqtt |         from esphome import mqtt | ||||||
|  |  | ||||||
|         host = mqtt.get_esphome_device_ip( |         devices = [ | ||||||
|             config, args.username, args.password, args.client_id |             mqtt.get_esphome_device_ip( | ||||||
|         ) |                 config, args.username, args.password, args.client_id | ||||||
|  |             ) | ||||||
|  |         ] | ||||||
|  |  | ||||||
|     if getattr(args, "file", None) is not None: |     return espota2.run_ota(devices, remote_port, password, binary) | ||||||
|         return espota2.run_ota(host, remote_port, password, args.file) |  | ||||||
|  |  | ||||||
|     return espota2.run_ota(host, remote_port, password, CORE.firmware_bin) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def show_logs(config: ConfigType, args: ArgsProtocol, devices: list[str]) -> int | None: | def show_logs(config: ConfigType, args: ArgsProtocol, devices: list[str]) -> int | None: | ||||||
| @@ -551,17 +553,11 @@ def command_upload(args: ArgsProtocol, config: ConfigType) -> int | None: | |||||||
|         purpose="uploading", |         purpose="uploading", | ||||||
|     ) |     ) | ||||||
|  |  | ||||||
|     # Try each device until one succeeds |     exit_code = upload_program(config, args, devices) | ||||||
|     exit_code = 1 |     if exit_code == 0: | ||||||
|     for device in devices: |         _LOGGER.info("Successfully uploaded program.") | ||||||
|         _LOGGER.info("Uploading to %s", device) |     else: | ||||||
|         exit_code = upload_program(config, args, device) |         _LOGGER.warning("Failed to upload to %s", devices) | ||||||
|         if exit_code == 0: |  | ||||||
|             _LOGGER.info("Successfully uploaded program.") |  | ||||||
|             return 0 |  | ||||||
|         if len(devices) > 1: |  | ||||||
|             _LOGGER.warning("Failed to upload to %s", device) |  | ||||||
|  |  | ||||||
|     return exit_code |     return exit_code | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -16,6 +16,7 @@ from esphome.const import ( | |||||||
|     DEVICE_CLASS_ENERGY, |     DEVICE_CLASS_ENERGY, | ||||||
|     DEVICE_CLASS_POWER, |     DEVICE_CLASS_POWER, | ||||||
|     DEVICE_CLASS_POWER_FACTOR, |     DEVICE_CLASS_POWER_FACTOR, | ||||||
|  |     DEVICE_CLASS_REACTIVE_POWER, | ||||||
|     DEVICE_CLASS_VOLTAGE, |     DEVICE_CLASS_VOLTAGE, | ||||||
|     ICON_CURRENT_AC, |     ICON_CURRENT_AC, | ||||||
|     ICON_LIGHTBULB, |     ICON_LIGHTBULB, | ||||||
| @@ -78,6 +79,7 @@ CONFIG_SCHEMA = ( | |||||||
|                 unit_of_measurement=UNIT_VOLT_AMPS_REACTIVE, |                 unit_of_measurement=UNIT_VOLT_AMPS_REACTIVE, | ||||||
|                 icon=ICON_LIGHTBULB, |                 icon=ICON_LIGHTBULB, | ||||||
|                 accuracy_decimals=2, |                 accuracy_decimals=2, | ||||||
|  |                 device_class=DEVICE_CLASS_REACTIVE_POWER, | ||||||
|                 state_class=STATE_CLASS_MEASUREMENT, |                 state_class=STATE_CLASS_MEASUREMENT, | ||||||
|             ), |             ), | ||||||
|             cv.Optional(CONF_POWER_FACTOR): sensor.sensor_schema( |             cv.Optional(CONF_POWER_FACTOR): sensor.sensor_schema( | ||||||
|   | |||||||
| @@ -17,10 +17,12 @@ from esphome.const import ( | |||||||
|     CONF_REACTIVE_POWER, |     CONF_REACTIVE_POWER, | ||||||
|     CONF_REVERSE_ACTIVE_ENERGY, |     CONF_REVERSE_ACTIVE_ENERGY, | ||||||
|     CONF_VOLTAGE, |     CONF_VOLTAGE, | ||||||
|  |     DEVICE_CLASS_APPARENT_POWER, | ||||||
|     DEVICE_CLASS_CURRENT, |     DEVICE_CLASS_CURRENT, | ||||||
|     DEVICE_CLASS_ENERGY, |     DEVICE_CLASS_ENERGY, | ||||||
|     DEVICE_CLASS_POWER, |     DEVICE_CLASS_POWER, | ||||||
|     DEVICE_CLASS_POWER_FACTOR, |     DEVICE_CLASS_POWER_FACTOR, | ||||||
|  |     DEVICE_CLASS_REACTIVE_POWER, | ||||||
|     DEVICE_CLASS_TEMPERATURE, |     DEVICE_CLASS_TEMPERATURE, | ||||||
|     DEVICE_CLASS_VOLTAGE, |     DEVICE_CLASS_VOLTAGE, | ||||||
|     ENTITY_CATEGORY_DIAGNOSTIC, |     ENTITY_CATEGORY_DIAGNOSTIC, | ||||||
| @@ -100,13 +102,13 @@ ATM90E32_PHASE_SCHEMA = cv.Schema( | |||||||
|             unit_of_measurement=UNIT_VOLT_AMPS_REACTIVE, |             unit_of_measurement=UNIT_VOLT_AMPS_REACTIVE, | ||||||
|             icon=ICON_LIGHTBULB, |             icon=ICON_LIGHTBULB, | ||||||
|             accuracy_decimals=2, |             accuracy_decimals=2, | ||||||
|             device_class=DEVICE_CLASS_POWER, |             device_class=DEVICE_CLASS_REACTIVE_POWER, | ||||||
|             state_class=STATE_CLASS_MEASUREMENT, |             state_class=STATE_CLASS_MEASUREMENT, | ||||||
|         ), |         ), | ||||||
|         cv.Optional(CONF_APPARENT_POWER): sensor.sensor_schema( |         cv.Optional(CONF_APPARENT_POWER): sensor.sensor_schema( | ||||||
|             unit_of_measurement=UNIT_VOLT_AMPS, |             unit_of_measurement=UNIT_VOLT_AMPS, | ||||||
|             accuracy_decimals=2, |             accuracy_decimals=2, | ||||||
|             device_class=DEVICE_CLASS_POWER, |             device_class=DEVICE_CLASS_APPARENT_POWER, | ||||||
|             state_class=STATE_CLASS_MEASUREMENT, |             state_class=STATE_CLASS_MEASUREMENT, | ||||||
|         ), |         ), | ||||||
|         cv.Optional(CONF_POWER_FACTOR): sensor.sensor_schema( |         cv.Optional(CONF_POWER_FACTOR): sensor.sensor_schema( | ||||||
|   | |||||||
| @@ -493,7 +493,7 @@ void BedJetHub::dump_config() { | |||||||
|                 "  ble_client.app_id: %d\n" |                 "  ble_client.app_id: %d\n" | ||||||
|                 "  ble_client.conn_id: %d", |                 "  ble_client.conn_id: %d", | ||||||
|                 this->get_name().c_str(), this->parent()->app_id, this->parent()->get_conn_id()); |                 this->get_name().c_str(), this->parent()->app_id, this->parent()->get_conn_id()); | ||||||
|   LOG_UPDATE_INTERVAL(this) |   LOG_UPDATE_INTERVAL(this); | ||||||
|   ESP_LOGCONFIG(TAG, "  Child components (%d):", this->children_.size()); |   ESP_LOGCONFIG(TAG, "  Child components (%d):", this->children_.size()); | ||||||
|   for (auto *child : this->children_) { |   for (auto *child : this->children_) { | ||||||
|     ESP_LOGCONFIG(TAG, "    - %s", child->describe().c_str()); |     ESP_LOGCONFIG(TAG, "    - %s", child->describe().c_str()); | ||||||
|   | |||||||
| @@ -152,7 +152,7 @@ void CCS811Component::send_env_data_() { | |||||||
| void CCS811Component::dump_config() { | void CCS811Component::dump_config() { | ||||||
|   ESP_LOGCONFIG(TAG, "CCS811"); |   ESP_LOGCONFIG(TAG, "CCS811"); | ||||||
|   LOG_I2C_DEVICE(this) |   LOG_I2C_DEVICE(this) | ||||||
|   LOG_UPDATE_INTERVAL(this) |   LOG_UPDATE_INTERVAL(this); | ||||||
|   LOG_SENSOR("  ", "CO2 Sensor", this->co2_); |   LOG_SENSOR("  ", "CO2 Sensor", this->co2_); | ||||||
|   LOG_SENSOR("  ", "TVOC Sensor", this->tvoc_); |   LOG_SENSOR("  ", "TVOC Sensor", this->tvoc_); | ||||||
|   LOG_TEXT_SENSOR("  ", "Firmware Version Sensor", this->version_) |   LOG_TEXT_SENSOR("  ", "Firmware Version Sensor", this->version_) | ||||||
|   | |||||||
| @@ -2,11 +2,11 @@ | |||||||
|  |  | ||||||
| #include "esphome/core/defines.h" | #include "esphome/core/defines.h" | ||||||
| #ifdef USE_OTA | #ifdef USE_OTA | ||||||
|  | #include "esphome/components/ota/ota_backend.h" | ||||||
|  | #include "esphome/components/socket/socket.h" | ||||||
| #include "esphome/core/helpers.h" | #include "esphome/core/helpers.h" | ||||||
| #include "esphome/core/log.h" | #include "esphome/core/log.h" | ||||||
| #include "esphome/core/preferences.h" | #include "esphome/core/preferences.h" | ||||||
| #include "esphome/components/ota/ota_backend.h" |  | ||||||
| #include "esphome/components/socket/socket.h" |  | ||||||
|  |  | ||||||
| namespace esphome { | namespace esphome { | ||||||
|  |  | ||||||
|   | |||||||
| @@ -4,6 +4,7 @@ | |||||||
| #include <cstdint> | #include <cstdint> | ||||||
| #include <cstring> | #include <cstring> | ||||||
| #include <limits> | #include <limits> | ||||||
|  | #include <type_traits> | ||||||
| #include "esphome/core/hal.h" | #include "esphome/core/hal.h" | ||||||
|  |  | ||||||
| namespace esphome::gpio_expander { | namespace esphome::gpio_expander { | ||||||
| @@ -11,18 +12,27 @@ namespace esphome::gpio_expander { | |||||||
| /// @brief A class to cache the read state of a GPIO expander. | /// @brief A class to cache the read state of a GPIO expander. | ||||||
| ///        This class caches reads between GPIO Pins which are on the same bank. | ///        This class caches reads between GPIO Pins which are on the same bank. | ||||||
| ///        This means that for reading whole Port (ex. 8 pins) component needs only one | ///        This means that for reading whole Port (ex. 8 pins) component needs only one | ||||||
| ///        I2C/SPI read per main loop call. It assumes, that one bit in byte identifies one GPIO pin | ///        I2C/SPI read per main loop call. It assumes that one bit in byte identifies one GPIO pin. | ||||||
|  | /// | ||||||
| ///        Template parameters: | ///        Template parameters: | ||||||
| ///           T - Type which represents internal register. Could be uint8_t or uint16_t. Adjust to | ///           T - Type which represents internal bank register. Could be uint8_t or uint16_t. | ||||||
| ///               match size of your internal GPIO bank register. | ///               Choose based on how your I/O expander reads pins: | ||||||
| ///           N - Number of pins | ///               * uint8_t:  For chips that read banks separately (8 pins at a time) | ||||||
| template<typename T, T N> class CachedGpioExpander { | ///                          Examples: MCP23017 (2x8-bit banks), TCA9555 (2x8-bit banks) | ||||||
|  | ///               * uint16_t: For chips that read all pins at once (up to 16 pins) | ||||||
|  | ///                          Examples: PCF8574/8575 (8/16 pins), PCA9554/9555 (8/16 pins) | ||||||
|  | ///           N - Total number of pins (maximum 65535) | ||||||
|  | ///           P - Type for pin number parameters (automatically selected based on N: | ||||||
|  | ///               uint8_t for N<=256, uint16_t for N>256). Can be explicitly specified | ||||||
|  | ///               if needed (e.g., for components like SN74HC165 with >256 pins) | ||||||
|  | template<typename T, uint16_t N, typename P = typename std::conditional<(N > 256), uint16_t, uint8_t>::type> | ||||||
|  | class CachedGpioExpander { | ||||||
|  public: |  public: | ||||||
|   /// @brief Read the state of the given pin. This will invalidate the cache for the given pin number. |   /// @brief Read the state of the given pin. This will invalidate the cache for the given pin number. | ||||||
|   /// @param pin Pin number to read |   /// @param pin Pin number to read | ||||||
|   /// @return Pin state |   /// @return Pin state | ||||||
|   bool digital_read(T pin) { |   bool digital_read(P pin) { | ||||||
|     const uint8_t bank = pin / BANK_SIZE; |     const P bank = pin / BANK_SIZE; | ||||||
|     const T pin_mask = (1 << (pin % BANK_SIZE)); |     const T pin_mask = (1 << (pin % BANK_SIZE)); | ||||||
|     // Check if specific pin cache is valid |     // Check if specific pin cache is valid | ||||||
|     if (this->read_cache_valid_[bank] & pin_mask) { |     if (this->read_cache_valid_[bank] & pin_mask) { | ||||||
| @@ -38,21 +48,31 @@ template<typename T, T N> class CachedGpioExpander { | |||||||
|     return this->digital_read_cache(pin); |     return this->digital_read_cache(pin); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   void digital_write(T pin, bool value) { this->digital_write_hw(pin, value); } |   void digital_write(P pin, bool value) { this->digital_write_hw(pin, value); } | ||||||
|  |  | ||||||
|  protected: |  protected: | ||||||
|   /// @brief Call component low level function to read GPIO state from device |   /// @brief Read GPIO bank from hardware into internal state | ||||||
|   virtual bool digital_read_hw(T pin) = 0; |   /// @param pin Pin number (used to determine which bank to read) | ||||||
|   /// @brief Call component read function from internal cache. |   /// @return true if read succeeded, false on communication error | ||||||
|   virtual bool digital_read_cache(T pin) = 0; |   /// @note This does NOT return the pin state. It returns whether the read operation succeeded. | ||||||
|   /// @brief Call component low level function to write GPIO state to device |   ///       The actual pin state should be returned by digital_read_cache(). | ||||||
|   virtual void digital_write_hw(T pin, bool value) = 0; |   virtual bool digital_read_hw(P pin) = 0; | ||||||
|  |  | ||||||
|  |   /// @brief Get cached pin value from internal state | ||||||
|  |   /// @param pin Pin number to read | ||||||
|  |   /// @return Pin state (true = HIGH, false = LOW) | ||||||
|  |   virtual bool digital_read_cache(P pin) = 0; | ||||||
|  |  | ||||||
|  |   /// @brief Write GPIO state to hardware | ||||||
|  |   /// @param pin Pin number to write | ||||||
|  |   /// @param value Pin state to write (true = HIGH, false = LOW) | ||||||
|  |   virtual void digital_write_hw(P pin, bool value) = 0; | ||||||
|  |  | ||||||
|   /// @brief Invalidate cache. This function should be called in component loop(). |   /// @brief Invalidate cache. This function should be called in component loop(). | ||||||
|   void reset_pin_cache_() { memset(this->read_cache_valid_, 0x00, CACHE_SIZE_BYTES); } |   void reset_pin_cache_() { memset(this->read_cache_valid_, 0x00, CACHE_SIZE_BYTES); } | ||||||
|  |  | ||||||
|   static constexpr uint8_t BITS_PER_BYTE = 8; |   static constexpr uint16_t BITS_PER_BYTE = 8; | ||||||
|   static constexpr uint8_t BANK_SIZE = sizeof(T) * BITS_PER_BYTE; |   static constexpr uint16_t BANK_SIZE = sizeof(T) * BITS_PER_BYTE; | ||||||
|   static constexpr size_t BANKS = N / BANK_SIZE; |   static constexpr size_t BANKS = N / BANK_SIZE; | ||||||
|   static constexpr size_t CACHE_SIZE_BYTES = BANKS * sizeof(T); |   static constexpr size_t CACHE_SIZE_BYTES = BANKS * sizeof(T); | ||||||
|  |  | ||||||
|   | |||||||
| @@ -57,7 +57,7 @@ void GroveGasMultichannelV2Component::update() { | |||||||
| void GroveGasMultichannelV2Component::dump_config() { | void GroveGasMultichannelV2Component::dump_config() { | ||||||
|   ESP_LOGCONFIG(TAG, "Grove Multichannel Gas Sensor V2"); |   ESP_LOGCONFIG(TAG, "Grove Multichannel Gas Sensor V2"); | ||||||
|   LOG_I2C_DEVICE(this) |   LOG_I2C_DEVICE(this) | ||||||
|   LOG_UPDATE_INTERVAL(this) |   LOG_UPDATE_INTERVAL(this); | ||||||
|   LOG_SENSOR("  ", "Nitrogen Dioxide", this->nitrogen_dioxide_sensor_); |   LOG_SENSOR("  ", "Nitrogen Dioxide", this->nitrogen_dioxide_sensor_); | ||||||
|   LOG_SENSOR("  ", "Ethanol", this->ethanol_sensor_); |   LOG_SENSOR("  ", "Ethanol", this->ethanol_sensor_); | ||||||
|   LOG_SENSOR("  ", "Carbon Monoxide", this->carbon_monoxide_sensor_); |   LOG_SENSOR("  ", "Carbon Monoxide", this->carbon_monoxide_sensor_); | ||||||
|   | |||||||
| @@ -42,7 +42,7 @@ void HLW8012Component::dump_config() { | |||||||
|                 "  Current resistor: %.1f mΩ\n" |                 "  Current resistor: %.1f mΩ\n" | ||||||
|                 "  Voltage Divider: %.1f", |                 "  Voltage Divider: %.1f", | ||||||
|                 this->change_mode_every_, this->current_resistor_ * 1000.0f, this->voltage_divider_); |                 this->change_mode_every_, this->current_resistor_ * 1000.0f, this->voltage_divider_); | ||||||
|   LOG_UPDATE_INTERVAL(this) |   LOG_UPDATE_INTERVAL(this); | ||||||
|   LOG_SENSOR("  ", "Voltage", this->voltage_sensor_); |   LOG_SENSOR("  ", "Voltage", this->voltage_sensor_); | ||||||
|   LOG_SENSOR("  ", "Current", this->current_sensor_); |   LOG_SENSOR("  ", "Current", this->current_sensor_); | ||||||
|   LOG_SENSOR("  ", "Power", this->power_sensor_); |   LOG_SENSOR("  ", "Power", this->power_sensor_); | ||||||
|   | |||||||
| @@ -246,14 +246,35 @@ void Logger::add_on_log_callback(std::function<void(uint8_t, const char *, const | |||||||
|   this->log_callback_.add(std::move(callback)); |   this->log_callback_.add(std::move(callback)); | ||||||
| } | } | ||||||
| float Logger::get_setup_priority() const { return setup_priority::BUS + 500.0f; } | float Logger::get_setup_priority() const { return setup_priority::BUS + 500.0f; } | ||||||
|  |  | ||||||
|  | #ifdef USE_STORE_LOG_STR_IN_FLASH | ||||||
|  | // ESP8266: PSTR() cannot be used in array initializers, so we need to declare | ||||||
|  | // each string separately as a global constant first | ||||||
|  | static const char LOG_LEVEL_NONE[] PROGMEM = "NONE"; | ||||||
|  | static const char LOG_LEVEL_ERROR[] PROGMEM = "ERROR"; | ||||||
|  | static const char LOG_LEVEL_WARN[] PROGMEM = "WARN"; | ||||||
|  | static const char LOG_LEVEL_INFO[] PROGMEM = "INFO"; | ||||||
|  | static const char LOG_LEVEL_CONFIG[] PROGMEM = "CONFIG"; | ||||||
|  | static const char LOG_LEVEL_DEBUG[] PROGMEM = "DEBUG"; | ||||||
|  | static const char LOG_LEVEL_VERBOSE[] PROGMEM = "VERBOSE"; | ||||||
|  | static const char LOG_LEVEL_VERY_VERBOSE[] PROGMEM = "VERY_VERBOSE"; | ||||||
|  |  | ||||||
|  | static const LogString *const LOG_LEVELS[] = { | ||||||
|  |     reinterpret_cast<const LogString *>(LOG_LEVEL_NONE),    reinterpret_cast<const LogString *>(LOG_LEVEL_ERROR), | ||||||
|  |     reinterpret_cast<const LogString *>(LOG_LEVEL_WARN),    reinterpret_cast<const LogString *>(LOG_LEVEL_INFO), | ||||||
|  |     reinterpret_cast<const LogString *>(LOG_LEVEL_CONFIG),  reinterpret_cast<const LogString *>(LOG_LEVEL_DEBUG), | ||||||
|  |     reinterpret_cast<const LogString *>(LOG_LEVEL_VERBOSE), reinterpret_cast<const LogString *>(LOG_LEVEL_VERY_VERBOSE), | ||||||
|  | }; | ||||||
|  | #else | ||||||
| static const char *const LOG_LEVELS[] = {"NONE", "ERROR", "WARN", "INFO", "CONFIG", "DEBUG", "VERBOSE", "VERY_VERBOSE"}; | static const char *const LOG_LEVELS[] = {"NONE", "ERROR", "WARN", "INFO", "CONFIG", "DEBUG", "VERBOSE", "VERY_VERBOSE"}; | ||||||
|  | #endif | ||||||
|  |  | ||||||
| void Logger::dump_config() { | void Logger::dump_config() { | ||||||
|   ESP_LOGCONFIG(TAG, |   ESP_LOGCONFIG(TAG, | ||||||
|                 "Logger:\n" |                 "Logger:\n" | ||||||
|                 "  Max Level: %s\n" |                 "  Max Level: %s\n" | ||||||
|                 "  Initial Level: %s", |                 "  Initial Level: %s", | ||||||
|                 LOG_LEVELS[ESPHOME_LOG_LEVEL], LOG_LEVELS[this->current_level_]); |                 LOG_STR_ARG(LOG_LEVELS[ESPHOME_LOG_LEVEL]), LOG_STR_ARG(LOG_LEVELS[this->current_level_])); | ||||||
| #ifndef USE_HOST | #ifndef USE_HOST | ||||||
|   ESP_LOGCONFIG(TAG, |   ESP_LOGCONFIG(TAG, | ||||||
|                 "  Log Baud Rate: %" PRIu32 "\n" |                 "  Log Baud Rate: %" PRIu32 "\n" | ||||||
| @@ -267,14 +288,14 @@ void Logger::dump_config() { | |||||||
| #endif | #endif | ||||||
|  |  | ||||||
|   for (auto &it : this->log_levels_) { |   for (auto &it : this->log_levels_) { | ||||||
|     ESP_LOGCONFIG(TAG, "  Level for '%s': %s", it.first.c_str(), LOG_LEVELS[it.second]); |     ESP_LOGCONFIG(TAG, "  Level for '%s': %s", it.first.c_str(), LOG_STR_ARG(LOG_LEVELS[it.second])); | ||||||
|   } |   } | ||||||
| } | } | ||||||
|  |  | ||||||
| void Logger::set_log_level(uint8_t level) { | void Logger::set_log_level(uint8_t level) { | ||||||
|   if (level > ESPHOME_LOG_LEVEL) { |   if (level > ESPHOME_LOG_LEVEL) { | ||||||
|     level = ESPHOME_LOG_LEVEL; |     level = ESPHOME_LOG_LEVEL; | ||||||
|     ESP_LOGW(TAG, "Cannot set log level higher than pre-compiled %s", LOG_LEVELS[ESPHOME_LOG_LEVEL]); |     ESP_LOGW(TAG, "Cannot set log level higher than pre-compiled %s", LOG_STR_ARG(LOG_LEVELS[ESPHOME_LOG_LEVEL])); | ||||||
|   } |   } | ||||||
|   this->current_level_ = level; |   this->current_level_ = level; | ||||||
|   this->level_callback_.call(level); |   this->level_callback_.call(level); | ||||||
|   | |||||||
| @@ -11,6 +11,7 @@ from esphome.const import ( | |||||||
|     CONF_OUTPUT, |     CONF_OUTPUT, | ||||||
| ) | ) | ||||||
|  |  | ||||||
|  | AUTO_LOAD = ["gpio_expander"] | ||||||
| DEPENDENCIES = ["i2c"] | DEPENDENCIES = ["i2c"] | ||||||
| MULTI_CONF = True | MULTI_CONF = True | ||||||
|  |  | ||||||
|   | |||||||
| @@ -22,14 +22,29 @@ void MCP23016::setup() { | |||||||
|   this->write_reg_(MCP23016_IODIR0, 0xFF); |   this->write_reg_(MCP23016_IODIR0, 0xFF); | ||||||
|   this->write_reg_(MCP23016_IODIR1, 0xFF); |   this->write_reg_(MCP23016_IODIR1, 0xFF); | ||||||
| } | } | ||||||
| bool MCP23016::digital_read(uint8_t pin) { |  | ||||||
|   uint8_t bit = pin % 8; | void MCP23016::loop() { | ||||||
|  |   // Invalidate cache at the start of each loop | ||||||
|  |   this->reset_pin_cache_(); | ||||||
|  | } | ||||||
|  | bool MCP23016::digital_read_hw(uint8_t pin) { | ||||||
|   uint8_t reg_addr = pin < 8 ? MCP23016_GP0 : MCP23016_GP1; |   uint8_t reg_addr = pin < 8 ? MCP23016_GP0 : MCP23016_GP1; | ||||||
|   uint8_t value = 0; |   uint8_t value = 0; | ||||||
|   this->read_reg_(reg_addr, &value); |   if (!this->read_reg_(reg_addr, &value)) { | ||||||
|   return value & (1 << bit); |     return false; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // Update the appropriate part of input_mask_ | ||||||
|  |   if (pin < 8) { | ||||||
|  |     this->input_mask_ = (this->input_mask_ & 0xFF00) | value; | ||||||
|  |   } else { | ||||||
|  |     this->input_mask_ = (this->input_mask_ & 0x00FF) | (uint16_t(value) << 8); | ||||||
|  |   } | ||||||
|  |   return true; | ||||||
| } | } | ||||||
| void MCP23016::digital_write(uint8_t pin, bool value) { |  | ||||||
|  | bool MCP23016::digital_read_cache(uint8_t pin) { return this->input_mask_ & (1 << pin); } | ||||||
|  | void MCP23016::digital_write_hw(uint8_t pin, bool value) { | ||||||
|   uint8_t reg_addr = pin < 8 ? MCP23016_OLAT0 : MCP23016_OLAT1; |   uint8_t reg_addr = pin < 8 ? MCP23016_OLAT0 : MCP23016_OLAT1; | ||||||
|   this->update_reg_(pin, value, reg_addr); |   this->update_reg_(pin, value, reg_addr); | ||||||
| } | } | ||||||
|   | |||||||
| @@ -3,6 +3,7 @@ | |||||||
| #include "esphome/core/component.h" | #include "esphome/core/component.h" | ||||||
| #include "esphome/core/hal.h" | #include "esphome/core/hal.h" | ||||||
| #include "esphome/components/i2c/i2c.h" | #include "esphome/components/i2c/i2c.h" | ||||||
|  | #include "esphome/components/gpio_expander/cached_gpio.h" | ||||||
|  |  | ||||||
| namespace esphome { | namespace esphome { | ||||||
| namespace mcp23016 { | namespace mcp23016 { | ||||||
| @@ -24,19 +25,22 @@ enum MCP23016GPIORegisters { | |||||||
|   MCP23016_IOCON1 = 0x0B, |   MCP23016_IOCON1 = 0x0B, | ||||||
| }; | }; | ||||||
|  |  | ||||||
| class MCP23016 : public Component, public i2c::I2CDevice { | class MCP23016 : public Component, public i2c::I2CDevice, public gpio_expander::CachedGpioExpander<uint8_t, 16> { | ||||||
|  public: |  public: | ||||||
|   MCP23016() = default; |   MCP23016() = default; | ||||||
|  |  | ||||||
|   void setup() override; |   void setup() override; | ||||||
|  |   void loop() override; | ||||||
|   bool digital_read(uint8_t pin); |  | ||||||
|   void digital_write(uint8_t pin, bool value); |  | ||||||
|   void pin_mode(uint8_t pin, gpio::Flags flags); |   void pin_mode(uint8_t pin, gpio::Flags flags); | ||||||
|  |  | ||||||
|   float get_setup_priority() const override; |   float get_setup_priority() const override; | ||||||
|  |  | ||||||
|  protected: |  protected: | ||||||
|  |   // Virtual methods from CachedGpioExpander | ||||||
|  |   bool digital_read_hw(uint8_t pin) override; | ||||||
|  |   bool digital_read_cache(uint8_t pin) override; | ||||||
|  |   void digital_write_hw(uint8_t pin, bool value) override; | ||||||
|  |  | ||||||
|   // read a given register |   // read a given register | ||||||
|   bool read_reg_(uint8_t reg, uint8_t *value); |   bool read_reg_(uint8_t reg, uint8_t *value); | ||||||
|   // write a value to a given register |   // write a value to a given register | ||||||
| @@ -46,6 +50,8 @@ class MCP23016 : public Component, public i2c::I2CDevice { | |||||||
|  |  | ||||||
|   uint8_t olat_0_{0x00}; |   uint8_t olat_0_{0x00}; | ||||||
|   uint8_t olat_1_{0x00}; |   uint8_t olat_1_{0x00}; | ||||||
|  |   // Cache for input values (16-bit combined for both banks) | ||||||
|  |   uint16_t input_mask_{0x00}; | ||||||
| }; | }; | ||||||
|  |  | ||||||
| class MCP23016GPIOPin : public GPIOPin { | class MCP23016GPIOPin : public GPIOPin { | ||||||
|   | |||||||
| @@ -14,6 +14,7 @@ from esphome.const import ( | |||||||
|  |  | ||||||
| CODEOWNERS = ["@Mat931"] | CODEOWNERS = ["@Mat931"] | ||||||
| DEPENDENCIES = ["i2c"] | DEPENDENCIES = ["i2c"] | ||||||
|  | AUTO_LOAD = ["gpio_expander"] | ||||||
| MULTI_CONF = True | MULTI_CONF = True | ||||||
| pca6416a_ns = cg.esphome_ns.namespace("pca6416a") | pca6416a_ns = cg.esphome_ns.namespace("pca6416a") | ||||||
|  |  | ||||||
|   | |||||||
| @@ -51,6 +51,11 @@ void PCA6416AComponent::setup() { | |||||||
|            this->status_has_error()); |            this->status_has_error()); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | void PCA6416AComponent::loop() { | ||||||
|  |   // Invalidate cache at the start of each loop | ||||||
|  |   this->reset_pin_cache_(); | ||||||
|  | } | ||||||
|  |  | ||||||
| void PCA6416AComponent::dump_config() { | void PCA6416AComponent::dump_config() { | ||||||
|   if (this->has_pullup_) { |   if (this->has_pullup_) { | ||||||
|     ESP_LOGCONFIG(TAG, "PCAL6416A:"); |     ESP_LOGCONFIG(TAG, "PCAL6416A:"); | ||||||
| @@ -63,15 +68,25 @@ void PCA6416AComponent::dump_config() { | |||||||
|   } |   } | ||||||
| } | } | ||||||
|  |  | ||||||
| bool PCA6416AComponent::digital_read(uint8_t pin) { | bool PCA6416AComponent::digital_read_hw(uint8_t pin) { | ||||||
|   uint8_t bit = pin % 8; |  | ||||||
|   uint8_t reg_addr = pin < 8 ? PCA6416A_INPUT0 : PCA6416A_INPUT1; |   uint8_t reg_addr = pin < 8 ? PCA6416A_INPUT0 : PCA6416A_INPUT1; | ||||||
|   uint8_t value = 0; |   uint8_t value = 0; | ||||||
|   this->read_register_(reg_addr, &value); |   if (!this->read_register_(reg_addr, &value)) { | ||||||
|   return value & (1 << bit); |     return false; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // Update the appropriate part of input_mask_ | ||||||
|  |   if (pin < 8) { | ||||||
|  |     this->input_mask_ = (this->input_mask_ & 0xFF00) | value; | ||||||
|  |   } else { | ||||||
|  |     this->input_mask_ = (this->input_mask_ & 0x00FF) | (uint16_t(value) << 8); | ||||||
|  |   } | ||||||
|  |   return true; | ||||||
| } | } | ||||||
|  |  | ||||||
| void PCA6416AComponent::digital_write(uint8_t pin, bool value) { | bool PCA6416AComponent::digital_read_cache(uint8_t pin) { return this->input_mask_ & (1 << pin); } | ||||||
|  |  | ||||||
|  | void PCA6416AComponent::digital_write_hw(uint8_t pin, bool value) { | ||||||
|   uint8_t reg_addr = pin < 8 ? PCA6416A_OUTPUT0 : PCA6416A_OUTPUT1; |   uint8_t reg_addr = pin < 8 ? PCA6416A_OUTPUT0 : PCA6416A_OUTPUT1; | ||||||
|   this->update_register_(pin, value, reg_addr); |   this->update_register_(pin, value, reg_addr); | ||||||
| } | } | ||||||
|   | |||||||
| @@ -3,20 +3,20 @@ | |||||||
| #include "esphome/core/component.h" | #include "esphome/core/component.h" | ||||||
| #include "esphome/core/hal.h" | #include "esphome/core/hal.h" | ||||||
| #include "esphome/components/i2c/i2c.h" | #include "esphome/components/i2c/i2c.h" | ||||||
|  | #include "esphome/components/gpio_expander/cached_gpio.h" | ||||||
|  |  | ||||||
| namespace esphome { | namespace esphome { | ||||||
| namespace pca6416a { | namespace pca6416a { | ||||||
|  |  | ||||||
| class PCA6416AComponent : public Component, public i2c::I2CDevice { | class PCA6416AComponent : public Component, | ||||||
|  |                           public i2c::I2CDevice, | ||||||
|  |                           public gpio_expander::CachedGpioExpander<uint8_t, 16> { | ||||||
|  public: |  public: | ||||||
|   PCA6416AComponent() = default; |   PCA6416AComponent() = default; | ||||||
|  |  | ||||||
|   /// Check i2c availability and setup masks |   /// Check i2c availability and setup masks | ||||||
|   void setup() override; |   void setup() override; | ||||||
|   /// Helper function to read the value of a pin. |   void loop() override; | ||||||
|   bool digital_read(uint8_t pin); |  | ||||||
|   /// Helper function to write the value of a pin. |  | ||||||
|   void digital_write(uint8_t pin, bool value); |  | ||||||
|   /// Helper function to set the pin mode of a pin. |   /// Helper function to set the pin mode of a pin. | ||||||
|   void pin_mode(uint8_t pin, gpio::Flags flags); |   void pin_mode(uint8_t pin, gpio::Flags flags); | ||||||
|  |  | ||||||
| @@ -25,6 +25,11 @@ class PCA6416AComponent : public Component, public i2c::I2CDevice { | |||||||
|   void dump_config() override; |   void dump_config() override; | ||||||
|  |  | ||||||
|  protected: |  protected: | ||||||
|  |   // Virtual methods from CachedGpioExpander | ||||||
|  |   bool digital_read_hw(uint8_t pin) override; | ||||||
|  |   bool digital_read_cache(uint8_t pin) override; | ||||||
|  |   void digital_write_hw(uint8_t pin, bool value) override; | ||||||
|  |  | ||||||
|   bool read_register_(uint8_t reg, uint8_t *value); |   bool read_register_(uint8_t reg, uint8_t *value); | ||||||
|   bool write_register_(uint8_t reg, uint8_t value); |   bool write_register_(uint8_t reg, uint8_t value); | ||||||
|   void update_register_(uint8_t pin, bool pin_value, uint8_t reg_addr); |   void update_register_(uint8_t pin, bool pin_value, uint8_t reg_addr); | ||||||
| @@ -32,6 +37,8 @@ class PCA6416AComponent : public Component, public i2c::I2CDevice { | |||||||
|   /// The mask to write as output state - 1 means HIGH, 0 means LOW |   /// The mask to write as output state - 1 means HIGH, 0 means LOW | ||||||
|   uint8_t output_0_{0x00}; |   uint8_t output_0_{0x00}; | ||||||
|   uint8_t output_1_{0x00}; |   uint8_t output_1_{0x00}; | ||||||
|  |   /// Cache for input values (16-bit combined for both banks) | ||||||
|  |   uint16_t input_mask_{0x00}; | ||||||
|   /// Storage for last I2C error seen |   /// Storage for last I2C error seen | ||||||
|   esphome::i2c::ErrorCode last_error_; |   esphome::i2c::ErrorCode last_error_; | ||||||
|   /// Only the PCAL6416A has pull-up resistors |   /// Only the PCAL6416A has pull-up resistors | ||||||
|   | |||||||
| @@ -11,7 +11,8 @@ from esphome.const import ( | |||||||
|     CONF_OUTPUT, |     CONF_OUTPUT, | ||||||
| ) | ) | ||||||
|  |  | ||||||
| CODEOWNERS = ["@hwstar", "@clydebarrow"] | CODEOWNERS = ["@hwstar", "@clydebarrow", "@bdraco"] | ||||||
|  | AUTO_LOAD = ["gpio_expander"] | ||||||
| DEPENDENCIES = ["i2c"] | DEPENDENCIES = ["i2c"] | ||||||
| MULTI_CONF = True | MULTI_CONF = True | ||||||
| CONF_PIN_COUNT = "pin_count" | CONF_PIN_COUNT = "pin_count" | ||||||
|   | |||||||
| @@ -37,10 +37,9 @@ void PCA9554Component::setup() { | |||||||
| } | } | ||||||
|  |  | ||||||
| void PCA9554Component::loop() { | void PCA9554Component::loop() { | ||||||
|   // The read_inputs_() method will cache the input values from the chip. |   // Invalidate the cache at the start of each loop. | ||||||
|   this->read_inputs_(); |   // The actual read will happen on demand when digital_read() is called | ||||||
|   // Clear all the previously read flags. |   this->reset_pin_cache_(); | ||||||
|   this->was_previously_read_ = 0x00; |  | ||||||
| } | } | ||||||
|  |  | ||||||
| void PCA9554Component::dump_config() { | void PCA9554Component::dump_config() { | ||||||
| @@ -54,21 +53,17 @@ void PCA9554Component::dump_config() { | |||||||
|   } |   } | ||||||
| } | } | ||||||
|  |  | ||||||
| bool PCA9554Component::digital_read(uint8_t pin) { | bool PCA9554Component::digital_read_hw(uint8_t pin) { | ||||||
|   // Note: We want to try and avoid doing any I2C bus read transactions here |   // Read all pins from hardware into input_mask_ | ||||||
|   // to conserve I2C bus bandwidth. So what we do is check to see if we |   return this->read_inputs_();  // Return true if I2C read succeeded, false on error | ||||||
|   // have seen a read during the time esphome is running this loop. If we have, | } | ||||||
|   // we do an I2C bus transaction to get the latest value. If we haven't |  | ||||||
|   // we return a cached value which was read at the time loop() was called. | bool PCA9554Component::digital_read_cache(uint8_t pin) { | ||||||
|   if (this->was_previously_read_ & (1 << pin)) |   // Return the cached pin state from input_mask_ | ||||||
|     this->read_inputs_();  // Force a read of a new value |  | ||||||
|   // Indicate we saw a read request for this pin in case a |  | ||||||
|   // read happens later in the same loop. |  | ||||||
|   this->was_previously_read_ |= (1 << pin); |  | ||||||
|   return this->input_mask_ & (1 << pin); |   return this->input_mask_ & (1 << pin); | ||||||
| } | } | ||||||
|  |  | ||||||
| void PCA9554Component::digital_write(uint8_t pin, bool value) { | void PCA9554Component::digital_write_hw(uint8_t pin, bool value) { | ||||||
|   if (value) { |   if (value) { | ||||||
|     this->output_mask_ |= (1 << pin); |     this->output_mask_ |= (1 << pin); | ||||||
|   } else { |   } else { | ||||||
| @@ -127,8 +122,7 @@ bool PCA9554Component::write_register_(uint8_t reg, uint16_t value) { | |||||||
|  |  | ||||||
| float PCA9554Component::get_setup_priority() const { return setup_priority::IO; } | float PCA9554Component::get_setup_priority() const { return setup_priority::IO; } | ||||||
|  |  | ||||||
| // Run our loop() method very early in the loop, so that we cache read values before | // Run our loop() method early to invalidate cache before any other components access the pins | ||||||
| // before other components call our digital_read() method. |  | ||||||
| float PCA9554Component::get_loop_priority() const { return 9.0f; }  // Just after WIFI | float PCA9554Component::get_loop_priority() const { return 9.0f; }  // Just after WIFI | ||||||
|  |  | ||||||
| void PCA9554GPIOPin::setup() { pin_mode(flags_); } | void PCA9554GPIOPin::setup() { pin_mode(flags_); } | ||||||
|   | |||||||
| @@ -3,22 +3,21 @@ | |||||||
| #include "esphome/core/component.h" | #include "esphome/core/component.h" | ||||||
| #include "esphome/core/hal.h" | #include "esphome/core/hal.h" | ||||||
| #include "esphome/components/i2c/i2c.h" | #include "esphome/components/i2c/i2c.h" | ||||||
|  | #include "esphome/components/gpio_expander/cached_gpio.h" | ||||||
|  |  | ||||||
| namespace esphome { | namespace esphome { | ||||||
| namespace pca9554 { | namespace pca9554 { | ||||||
|  |  | ||||||
| class PCA9554Component : public Component, public i2c::I2CDevice { | class PCA9554Component : public Component, | ||||||
|  |                          public i2c::I2CDevice, | ||||||
|  |                          public gpio_expander::CachedGpioExpander<uint16_t, 16> { | ||||||
|  public: |  public: | ||||||
|   PCA9554Component() = default; |   PCA9554Component() = default; | ||||||
|  |  | ||||||
|   /// Check i2c availability and setup masks |   /// Check i2c availability and setup masks | ||||||
|   void setup() override; |   void setup() override; | ||||||
|   /// Poll for input changes periodically |   /// Invalidate cache at start of each loop | ||||||
|   void loop() override; |   void loop() override; | ||||||
|   /// Helper function to read the value of a pin. |  | ||||||
|   bool digital_read(uint8_t pin); |  | ||||||
|   /// Helper function to write the value of a pin. |  | ||||||
|   void digital_write(uint8_t pin, bool value); |  | ||||||
|   /// Helper function to set the pin mode of a pin. |   /// Helper function to set the pin mode of a pin. | ||||||
|   void pin_mode(uint8_t pin, gpio::Flags flags); |   void pin_mode(uint8_t pin, gpio::Flags flags); | ||||||
|  |  | ||||||
| @@ -32,9 +31,13 @@ class PCA9554Component : public Component, public i2c::I2CDevice { | |||||||
|  |  | ||||||
|  protected: |  protected: | ||||||
|   bool read_inputs_(); |   bool read_inputs_(); | ||||||
|  |  | ||||||
|   bool write_register_(uint8_t reg, uint16_t value); |   bool write_register_(uint8_t reg, uint16_t value); | ||||||
|  |  | ||||||
|  |   // Virtual methods from CachedGpioExpander | ||||||
|  |   bool digital_read_hw(uint8_t pin) override; | ||||||
|  |   bool digital_read_cache(uint8_t pin) override; | ||||||
|  |   void digital_write_hw(uint8_t pin, bool value) override; | ||||||
|  |  | ||||||
|   /// number of bits the expander has |   /// number of bits the expander has | ||||||
|   size_t pin_count_{8}; |   size_t pin_count_{8}; | ||||||
|   /// width of registers |   /// width of registers | ||||||
| @@ -45,8 +48,6 @@ class PCA9554Component : public Component, public i2c::I2CDevice { | |||||||
|   uint16_t output_mask_{0x00}; |   uint16_t output_mask_{0x00}; | ||||||
|   /// The state of the actual input pin states - 1 means HIGH, 0 means LOW |   /// The state of the actual input pin states - 1 means HIGH, 0 means LOW | ||||||
|   uint16_t input_mask_{0x00}; |   uint16_t input_mask_{0x00}; | ||||||
|   /// Flags to check if read previously during this loop |  | ||||||
|   uint16_t was_previously_read_ = {0x00}; |  | ||||||
|   /// Storage for last I2C error seen |   /// Storage for last I2C error seen | ||||||
|   esphome::i2c::ErrorCode last_error_; |   esphome::i2c::ErrorCode last_error_; | ||||||
| }; | }; | ||||||
|   | |||||||
| @@ -11,6 +11,7 @@ from esphome.const import ( | |||||||
|     CONF_OUTPUT, |     CONF_OUTPUT, | ||||||
| ) | ) | ||||||
|  |  | ||||||
|  | AUTO_LOAD = ["gpio_expander"] | ||||||
| DEPENDENCIES = ["i2c"] | DEPENDENCIES = ["i2c"] | ||||||
| MULTI_CONF = True | MULTI_CONF = True | ||||||
|  |  | ||||||
|   | |||||||
| @@ -16,6 +16,10 @@ void PCF8574Component::setup() { | |||||||
|   this->write_gpio_(); |   this->write_gpio_(); | ||||||
|   this->read_gpio_(); |   this->read_gpio_(); | ||||||
| } | } | ||||||
|  | void PCF8574Component::loop() { | ||||||
|  |   // Invalidate the cache at the start of each loop | ||||||
|  |   this->reset_pin_cache_(); | ||||||
|  | } | ||||||
| void PCF8574Component::dump_config() { | void PCF8574Component::dump_config() { | ||||||
|   ESP_LOGCONFIG(TAG, "PCF8574:"); |   ESP_LOGCONFIG(TAG, "PCF8574:"); | ||||||
|   LOG_I2C_DEVICE(this) |   LOG_I2C_DEVICE(this) | ||||||
| @@ -24,17 +28,19 @@ void PCF8574Component::dump_config() { | |||||||
|     ESP_LOGE(TAG, ESP_LOG_MSG_COMM_FAIL); |     ESP_LOGE(TAG, ESP_LOG_MSG_COMM_FAIL); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| bool PCF8574Component::digital_read(uint8_t pin) { | bool PCF8574Component::digital_read_hw(uint8_t pin) { | ||||||
|   this->read_gpio_(); |   // Read all pins from hardware into input_mask_ | ||||||
|   return this->input_mask_ & (1 << pin); |   return this->read_gpio_();  // Return true if I2C read succeeded, false on error | ||||||
| } | } | ||||||
| void PCF8574Component::digital_write(uint8_t pin, bool value) { |  | ||||||
|  | bool PCF8574Component::digital_read_cache(uint8_t pin) { return this->input_mask_ & (1 << pin); } | ||||||
|  |  | ||||||
|  | void PCF8574Component::digital_write_hw(uint8_t pin, bool value) { | ||||||
|   if (value) { |   if (value) { | ||||||
|     this->output_mask_ |= (1 << pin); |     this->output_mask_ |= (1 << pin); | ||||||
|   } else { |   } else { | ||||||
|     this->output_mask_ &= ~(1 << pin); |     this->output_mask_ &= ~(1 << pin); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   this->write_gpio_(); |   this->write_gpio_(); | ||||||
| } | } | ||||||
| void PCF8574Component::pin_mode(uint8_t pin, gpio::Flags flags) { | void PCF8574Component::pin_mode(uint8_t pin, gpio::Flags flags) { | ||||||
| @@ -91,6 +97,9 @@ bool PCF8574Component::write_gpio_() { | |||||||
| } | } | ||||||
| float PCF8574Component::get_setup_priority() const { return setup_priority::IO; } | float PCF8574Component::get_setup_priority() const { return setup_priority::IO; } | ||||||
|  |  | ||||||
|  | // Run our loop() method early to invalidate cache before any other components access the pins | ||||||
|  | float PCF8574Component::get_loop_priority() const { return 9.0f; }  // Just after WIFI | ||||||
|  |  | ||||||
| void PCF8574GPIOPin::setup() { pin_mode(flags_); } | void PCF8574GPIOPin::setup() { pin_mode(flags_); } | ||||||
| void PCF8574GPIOPin::pin_mode(gpio::Flags flags) { this->parent_->pin_mode(this->pin_, flags); } | void PCF8574GPIOPin::pin_mode(gpio::Flags flags) { this->parent_->pin_mode(this->pin_, flags); } | ||||||
| bool PCF8574GPIOPin::digital_read() { return this->parent_->digital_read(this->pin_) != this->inverted_; } | bool PCF8574GPIOPin::digital_read() { return this->parent_->digital_read(this->pin_) != this->inverted_; } | ||||||
|   | |||||||
| @@ -3,11 +3,16 @@ | |||||||
| #include "esphome/core/component.h" | #include "esphome/core/component.h" | ||||||
| #include "esphome/core/hal.h" | #include "esphome/core/hal.h" | ||||||
| #include "esphome/components/i2c/i2c.h" | #include "esphome/components/i2c/i2c.h" | ||||||
|  | #include "esphome/components/gpio_expander/cached_gpio.h" | ||||||
|  |  | ||||||
| namespace esphome { | namespace esphome { | ||||||
| namespace pcf8574 { | namespace pcf8574 { | ||||||
|  |  | ||||||
| class PCF8574Component : public Component, public i2c::I2CDevice { | // PCF8574(8 pins)/PCF8575(16 pins) always read/write all pins in a single I2C transaction | ||||||
|  | // so we use uint16_t as bank type to ensure all pins are in one bank and cached together | ||||||
|  | class PCF8574Component : public Component, | ||||||
|  |                          public i2c::I2CDevice, | ||||||
|  |                          public gpio_expander::CachedGpioExpander<uint16_t, 16> { | ||||||
|  public: |  public: | ||||||
|   PCF8574Component() = default; |   PCF8574Component() = default; | ||||||
|  |  | ||||||
| @@ -15,20 +20,22 @@ class PCF8574Component : public Component, public i2c::I2CDevice { | |||||||
|  |  | ||||||
|   /// Check i2c availability and setup masks |   /// Check i2c availability and setup masks | ||||||
|   void setup() override; |   void setup() override; | ||||||
|   /// Helper function to read the value of a pin. |   /// Invalidate cache at start of each loop | ||||||
|   bool digital_read(uint8_t pin); |   void loop() override; | ||||||
|   /// Helper function to write the value of a pin. |  | ||||||
|   void digital_write(uint8_t pin, bool value); |  | ||||||
|   /// Helper function to set the pin mode of a pin. |   /// Helper function to set the pin mode of a pin. | ||||||
|   void pin_mode(uint8_t pin, gpio::Flags flags); |   void pin_mode(uint8_t pin, gpio::Flags flags); | ||||||
|  |  | ||||||
|   float get_setup_priority() const override; |   float get_setup_priority() const override; | ||||||
|  |   float get_loop_priority() const override; | ||||||
|  |  | ||||||
|   void dump_config() override; |   void dump_config() override; | ||||||
|  |  | ||||||
|  protected: |  protected: | ||||||
|   bool read_gpio_(); |   bool digital_read_hw(uint8_t pin) override; | ||||||
|  |   bool digital_read_cache(uint8_t pin) override; | ||||||
|  |   void digital_write_hw(uint8_t pin, bool value) override; | ||||||
|  |  | ||||||
|  |   bool read_gpio_(); | ||||||
|   bool write_gpio_(); |   bool write_gpio_(); | ||||||
|  |  | ||||||
|   /// Mask for the pin mode - 1 means output, 0 means input |   /// Mask for the pin mode - 1 means output, 0 means input | ||||||
|   | |||||||
| @@ -18,7 +18,7 @@ void IRAM_ATTR PulseWidthSensorStore::gpio_intr(PulseWidthSensorStore *arg) { | |||||||
|  |  | ||||||
| void PulseWidthSensor::dump_config() { | void PulseWidthSensor::dump_config() { | ||||||
|   LOG_SENSOR("", "Pulse Width", this); |   LOG_SENSOR("", "Pulse Width", this); | ||||||
|   LOG_UPDATE_INTERVAL(this) |   LOG_UPDATE_INTERVAL(this); | ||||||
|   LOG_PIN("  Pin: ", this->pin_); |   LOG_PIN("  Pin: ", this->pin_); | ||||||
| } | } | ||||||
| void PulseWidthSensor::update() { | void PulseWidthSensor::update() { | ||||||
|   | |||||||
| @@ -25,7 +25,7 @@ CONF_SCAN_TIME = "scan_time" | |||||||
| CONF_DEBOUNCE_TIME = "debounce_time" | CONF_DEBOUNCE_TIME = "debounce_time" | ||||||
| CONF_SX1509_ID = "sx1509_id" | CONF_SX1509_ID = "sx1509_id" | ||||||
|  |  | ||||||
| AUTO_LOAD = ["key_provider"] | AUTO_LOAD = ["key_provider", "gpio_expander"] | ||||||
| DEPENDENCIES = ["i2c"] | DEPENDENCIES = ["i2c"] | ||||||
| MULTI_CONF = True | MULTI_CONF = True | ||||||
|  |  | ||||||
|   | |||||||
| @@ -39,6 +39,9 @@ void SX1509Component::dump_config() { | |||||||
| } | } | ||||||
|  |  | ||||||
| void SX1509Component::loop() { | void SX1509Component::loop() { | ||||||
|  |   // Reset cache at the start of each loop | ||||||
|  |   this->reset_pin_cache_(); | ||||||
|  |  | ||||||
|   if (this->has_keypad_) { |   if (this->has_keypad_) { | ||||||
|     if (millis() - this->last_loop_timestamp_ < min_loop_period_) |     if (millis() - this->last_loop_timestamp_ < min_loop_period_) | ||||||
|       return; |       return; | ||||||
| @@ -73,18 +76,20 @@ void SX1509Component::loop() { | |||||||
|   } |   } | ||||||
| } | } | ||||||
|  |  | ||||||
| bool SX1509Component::digital_read(uint8_t pin) { | bool SX1509Component::digital_read_hw(uint8_t pin) { | ||||||
|  |   // Always read all pins when any input pin is accessed | ||||||
|  |   return this->read_byte_16(REG_DATA_B, &this->input_mask_); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | bool SX1509Component::digital_read_cache(uint8_t pin) { | ||||||
|  |   // Return cached value for input pins, false for output pins | ||||||
|   if (this->ddr_mask_ & (1 << pin)) { |   if (this->ddr_mask_ & (1 << pin)) { | ||||||
|     uint16_t temp_reg_data; |     return (this->input_mask_ & (1 << pin)) != 0; | ||||||
|     if (!this->read_byte_16(REG_DATA_B, &temp_reg_data)) |  | ||||||
|       return false; |  | ||||||
|     if (temp_reg_data & (1 << pin)) |  | ||||||
|       return true; |  | ||||||
|   } |   } | ||||||
|   return false; |   return false; | ||||||
| } | } | ||||||
|  |  | ||||||
| void SX1509Component::digital_write(uint8_t pin, bool bit_value) { | void SX1509Component::digital_write_hw(uint8_t pin, bool bit_value) { | ||||||
|   if ((~this->ddr_mask_) & (1 << pin)) { |   if ((~this->ddr_mask_) & (1 << pin)) { | ||||||
|     // If the pin is an output, write high/low |     // If the pin is an output, write high/low | ||||||
|     uint16_t temp_reg_data = 0; |     uint16_t temp_reg_data = 0; | ||||||
|   | |||||||
| @@ -2,6 +2,7 @@ | |||||||
|  |  | ||||||
| #include "esphome/components/i2c/i2c.h" | #include "esphome/components/i2c/i2c.h" | ||||||
| #include "esphome/components/key_provider/key_provider.h" | #include "esphome/components/key_provider/key_provider.h" | ||||||
|  | #include "esphome/components/gpio_expander/cached_gpio.h" | ||||||
| #include "esphome/core/component.h" | #include "esphome/core/component.h" | ||||||
| #include "esphome/core/hal.h" | #include "esphome/core/hal.h" | ||||||
| #include "sx1509_gpio_pin.h" | #include "sx1509_gpio_pin.h" | ||||||
| @@ -30,7 +31,10 @@ class SX1509Processor { | |||||||
|  |  | ||||||
| class SX1509KeyTrigger : public Trigger<uint8_t> {}; | class SX1509KeyTrigger : public Trigger<uint8_t> {}; | ||||||
|  |  | ||||||
| class SX1509Component : public Component, public i2c::I2CDevice, public key_provider::KeyProvider { | class SX1509Component : public Component, | ||||||
|  |                         public i2c::I2CDevice, | ||||||
|  |                         public gpio_expander::CachedGpioExpander<uint16_t, 16>, | ||||||
|  |                         public key_provider::KeyProvider { | ||||||
|  public: |  public: | ||||||
|   SX1509Component() = default; |   SX1509Component() = default; | ||||||
|  |  | ||||||
| @@ -39,11 +43,9 @@ class SX1509Component : public Component, public i2c::I2CDevice, public key_prov | |||||||
|   float get_setup_priority() const override { return setup_priority::HARDWARE; } |   float get_setup_priority() const override { return setup_priority::HARDWARE; } | ||||||
|   void loop() override; |   void loop() override; | ||||||
|  |  | ||||||
|   bool digital_read(uint8_t pin); |  | ||||||
|   uint16_t read_key_data(); |   uint16_t read_key_data(); | ||||||
|   void set_pin_value(uint8_t pin, uint8_t i_on) { this->write_byte(REG_I_ON[pin], i_on); }; |   void set_pin_value(uint8_t pin, uint8_t i_on) { this->write_byte(REG_I_ON[pin], i_on); }; | ||||||
|   void pin_mode(uint8_t pin, gpio::Flags flags); |   void pin_mode(uint8_t pin, gpio::Flags flags); | ||||||
|   void digital_write(uint8_t pin, bool bit_value); |  | ||||||
|   uint32_t get_clock() { return this->clk_x_; }; |   uint32_t get_clock() { return this->clk_x_; }; | ||||||
|   void set_rows_cols(uint8_t rows, uint8_t cols) { |   void set_rows_cols(uint8_t rows, uint8_t cols) { | ||||||
|     this->rows_ = rows; |     this->rows_ = rows; | ||||||
| @@ -61,10 +63,15 @@ class SX1509Component : public Component, public i2c::I2CDevice, public key_prov | |||||||
|   void setup_led_driver(uint8_t pin); |   void setup_led_driver(uint8_t pin); | ||||||
|  |  | ||||||
|  protected: |  protected: | ||||||
|  |   // Virtual methods from CachedGpioExpander | ||||||
|  |   bool digital_read_hw(uint8_t pin) override; | ||||||
|  |   bool digital_read_cache(uint8_t pin) override; | ||||||
|  |   void digital_write_hw(uint8_t pin, bool value) override; | ||||||
|  |  | ||||||
|   uint32_t clk_x_ = 2000000; |   uint32_t clk_x_ = 2000000; | ||||||
|   uint8_t frequency_ = 0; |   uint8_t frequency_ = 0; | ||||||
|   uint16_t ddr_mask_ = 0x00; |   uint16_t ddr_mask_ = 0x00; | ||||||
|   uint16_t input_mask_ = 0x00; |   uint16_t input_mask_ = 0x00;  // Cache for input values (16-bit for all pins) | ||||||
|   uint16_t port_mask_ = 0x00; |   uint16_t port_mask_ = 0x00; | ||||||
|   uint16_t output_state_ = 0x00; |   uint16_t output_state_ = 0x00; | ||||||
|   bool has_keypad_ = false; |   bool has_keypad_ = false; | ||||||
|   | |||||||
| @@ -104,7 +104,7 @@ void UFireECComponent::write_data_(uint8_t reg, float data) { | |||||||
| void UFireECComponent::dump_config() { | void UFireECComponent::dump_config() { | ||||||
|   ESP_LOGCONFIG(TAG, "uFire-EC"); |   ESP_LOGCONFIG(TAG, "uFire-EC"); | ||||||
|   LOG_I2C_DEVICE(this) |   LOG_I2C_DEVICE(this) | ||||||
|   LOG_UPDATE_INTERVAL(this) |   LOG_UPDATE_INTERVAL(this); | ||||||
|   LOG_SENSOR("  ", "EC Sensor", this->ec_sensor_); |   LOG_SENSOR("  ", "EC Sensor", this->ec_sensor_); | ||||||
|   LOG_SENSOR("  ", "Temperature Sensor", this->temperature_sensor_); |   LOG_SENSOR("  ", "Temperature Sensor", this->temperature_sensor_); | ||||||
|   LOG_SENSOR("  ", "Temperature Sensor external", this->temperature_sensor_external_); |   LOG_SENSOR("  ", "Temperature Sensor external", this->temperature_sensor_external_); | ||||||
|   | |||||||
| @@ -141,7 +141,7 @@ void UFireISEComponent::write_data_(uint8_t reg, float data) { | |||||||
| void UFireISEComponent::dump_config() { | void UFireISEComponent::dump_config() { | ||||||
|   ESP_LOGCONFIG(TAG, "uFire-ISE"); |   ESP_LOGCONFIG(TAG, "uFire-ISE"); | ||||||
|   LOG_I2C_DEVICE(this) |   LOG_I2C_DEVICE(this) | ||||||
|   LOG_UPDATE_INTERVAL(this) |   LOG_UPDATE_INTERVAL(this); | ||||||
|   LOG_SENSOR("  ", "PH Sensor", this->ph_sensor_); |   LOG_SENSOR("  ", "PH Sensor", this->ph_sensor_); | ||||||
|   LOG_SENSOR("  ", "Temperature Sensor", this->temperature_sensor_); |   LOG_SENSOR("  ", "Temperature Sensor", this->temperature_sensor_); | ||||||
|   LOG_SENSOR("  ", "Temperature Sensor external", this->temperature_sensor_external_); |   LOG_SENSOR("  ", "Temperature Sensor external", this->temperature_sensor_external_); | ||||||
|   | |||||||
| @@ -181,7 +181,7 @@ void WaveshareEPaper2P13InV3::dump_config() { | |||||||
|   LOG_PIN("  Reset Pin: ", this->reset_pin_) |   LOG_PIN("  Reset Pin: ", this->reset_pin_) | ||||||
|   LOG_PIN("  DC Pin: ", this->dc_pin_) |   LOG_PIN("  DC Pin: ", this->dc_pin_) | ||||||
|   LOG_PIN("  Busy Pin: ", this->busy_pin_) |   LOG_PIN("  Busy Pin: ", this->busy_pin_) | ||||||
|   LOG_UPDATE_INTERVAL(this) |   LOG_UPDATE_INTERVAL(this); | ||||||
| } | } | ||||||
|  |  | ||||||
| void WaveshareEPaper2P13InV3::set_full_update_every(uint32_t full_update_every) { | void WaveshareEPaper2P13InV3::set_full_update_every(uint32_t full_update_every) { | ||||||
|   | |||||||
| @@ -340,6 +340,18 @@ void Component::status_momentary_error(const std::string &name, uint32_t length) | |||||||
|   this->set_timeout(name, length, [this]() { this->status_clear_error(); }); |   this->set_timeout(name, length, [this]() { this->status_clear_error(); }); | ||||||
| } | } | ||||||
| void Component::dump_config() {} | void Component::dump_config() {} | ||||||
|  |  | ||||||
|  | // Function implementation of LOG_UPDATE_INTERVAL macro to reduce code size | ||||||
|  | void log_update_interval(const char *tag, PollingComponent *component) { | ||||||
|  |   uint32_t update_interval = component->get_update_interval(); | ||||||
|  |   if (update_interval == SCHEDULER_DONT_RUN) { | ||||||
|  |     ESP_LOGCONFIG(tag, "  Update Interval: never"); | ||||||
|  |   } else if (update_interval < 100) { | ||||||
|  |     ESP_LOGCONFIG(tag, "  Update Interval: %.3fs", update_interval / 1000.0f); | ||||||
|  |   } else { | ||||||
|  |     ESP_LOGCONFIG(tag, "  Update Interval: %.1fs", update_interval / 1000.0f); | ||||||
|  |   } | ||||||
|  | } | ||||||
| float Component::get_actual_setup_priority() const { | float Component::get_actual_setup_priority() const { | ||||||
|   // Check if there's an override in the global vector |   // Check if there's an override in the global vector | ||||||
|   if (setup_priority_overrides) { |   if (setup_priority_overrides) { | ||||||
|   | |||||||
| @@ -48,14 +48,13 @@ extern const float LATE; | |||||||
|  |  | ||||||
| static const uint32_t SCHEDULER_DONT_RUN = 4294967295UL; | static const uint32_t SCHEDULER_DONT_RUN = 4294967295UL; | ||||||
|  |  | ||||||
| #define LOG_UPDATE_INTERVAL(this) \ | // Forward declaration | ||||||
|   if (this->get_update_interval() == SCHEDULER_DONT_RUN) { \ | class PollingComponent; | ||||||
|     ESP_LOGCONFIG(TAG, "  Update Interval: never"); \ |  | ||||||
|   } else if (this->get_update_interval() < 100) { \ | // Function declaration for LOG_UPDATE_INTERVAL | ||||||
|     ESP_LOGCONFIG(TAG, "  Update Interval: %.3fs", this->get_update_interval() / 1000.0f); \ | void log_update_interval(const char *tag, PollingComponent *component); | ||||||
|   } else { \ |  | ||||||
|     ESP_LOGCONFIG(TAG, "  Update Interval: %.1fs", this->get_update_interval() / 1000.0f); \ | #define LOG_UPDATE_INTERVAL(this) log_update_interval(TAG, this) | ||||||
|   } |  | ||||||
|  |  | ||||||
| extern const uint8_t COMPONENT_STATE_MASK; | extern const uint8_t COMPONENT_STATE_MASK; | ||||||
| extern const uint8_t COMPONENT_STATE_CONSTRUCTION; | extern const uint8_t COMPONENT_STATE_CONSTRUCTION; | ||||||
|   | |||||||
| @@ -14,7 +14,19 @@ namespace esphome { | |||||||
|  |  | ||||||
| static const char *const TAG = "scheduler"; | static const char *const TAG = "scheduler"; | ||||||
|  |  | ||||||
| static const uint32_t MAX_LOGICALLY_DELETED_ITEMS = 10; | // Memory pool configuration constants | ||||||
|  | // Pool size of 5 matches typical usage patterns (2-4 active timers) | ||||||
|  | // - Minimal memory overhead (~250 bytes on ESP32) | ||||||
|  | // - Sufficient for most configs with a couple sensors/components | ||||||
|  | // - Still prevents heap fragmentation and allocation stalls | ||||||
|  | // - Complex setups with many timers will just allocate beyond the pool | ||||||
|  | // See https://github.com/esphome/backlog/issues/52 | ||||||
|  | static constexpr size_t MAX_POOL_SIZE = 5; | ||||||
|  |  | ||||||
|  | // Maximum number of logically deleted (cancelled) items before forcing cleanup. | ||||||
|  | // Set to 5 to match the pool size - when we have as many cancelled items as our | ||||||
|  | // pool can hold, it's time to clean up and recycle them. | ||||||
|  | static constexpr uint32_t MAX_LOGICALLY_DELETED_ITEMS = 5; | ||||||
| // Half the 32-bit range - used to detect rollovers vs normal time progression | // Half the 32-bit range - used to detect rollovers vs normal time progression | ||||||
| static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2; | static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2; | ||||||
| // max delay to start an interval sequence | // max delay to start an interval sequence | ||||||
| @@ -79,8 +91,28 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type | |||||||
|     return; |     return; | ||||||
|   } |   } | ||||||
|  |  | ||||||
|  |   // Get fresh timestamp BEFORE taking lock - millis_64_ may need to acquire lock itself | ||||||
|  |   const uint64_t now = this->millis_64_(millis()); | ||||||
|  |  | ||||||
|  |   // Take lock early to protect scheduler_item_pool_ access | ||||||
|  |   LockGuard guard{this->lock_}; | ||||||
|  |  | ||||||
|   // Create and populate the scheduler item |   // Create and populate the scheduler item | ||||||
|   auto item = make_unique<SchedulerItem>(); |   std::unique_ptr<SchedulerItem> item; | ||||||
|  |   if (!this->scheduler_item_pool_.empty()) { | ||||||
|  |     // Reuse from pool | ||||||
|  |     item = std::move(this->scheduler_item_pool_.back()); | ||||||
|  |     this->scheduler_item_pool_.pop_back(); | ||||||
|  | #ifdef ESPHOME_DEBUG_SCHEDULER | ||||||
|  |     ESP_LOGD(TAG, "Reused item from pool (pool size now: %zu)", this->scheduler_item_pool_.size()); | ||||||
|  | #endif | ||||||
|  |   } else { | ||||||
|  |     // Allocate new if pool is empty | ||||||
|  |     item = make_unique<SchedulerItem>(); | ||||||
|  | #ifdef ESPHOME_DEBUG_SCHEDULER | ||||||
|  |     ESP_LOGD(TAG, "Allocated new item (pool empty)"); | ||||||
|  | #endif | ||||||
|  |   } | ||||||
|   item->component = component; |   item->component = component; | ||||||
|   item->set_name(name_cstr, !is_static_string); |   item->set_name(name_cstr, !is_static_string); | ||||||
|   item->type = type; |   item->type = type; | ||||||
| @@ -99,7 +131,6 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type | |||||||
|   // Single-core platforms don't need thread-safe defer handling |   // Single-core platforms don't need thread-safe defer handling | ||||||
|   if (delay == 0 && type == SchedulerItem::TIMEOUT) { |   if (delay == 0 && type == SchedulerItem::TIMEOUT) { | ||||||
|     // Put in defer queue for guaranteed FIFO execution |     // Put in defer queue for guaranteed FIFO execution | ||||||
|     LockGuard guard{this->lock_}; |  | ||||||
|     if (!skip_cancel) { |     if (!skip_cancel) { | ||||||
|       this->cancel_item_locked_(component, name_cstr, type); |       this->cancel_item_locked_(component, name_cstr, type); | ||||||
|     } |     } | ||||||
| @@ -108,21 +139,18 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type | |||||||
|   } |   } | ||||||
| #endif /* not ESPHOME_THREAD_SINGLE */ | #endif /* not ESPHOME_THREAD_SINGLE */ | ||||||
|  |  | ||||||
|   // Get fresh timestamp for new timer/interval - ensures accurate scheduling |  | ||||||
|   const auto now = this->millis_64_(millis());  // Fresh millis() call |  | ||||||
|  |  | ||||||
|   // Type-specific setup |   // Type-specific setup | ||||||
|   if (type == SchedulerItem::INTERVAL) { |   if (type == SchedulerItem::INTERVAL) { | ||||||
|     item->interval = delay; |     item->interval = delay; | ||||||
|     // first execution happens immediately after a random smallish offset |     // first execution happens immediately after a random smallish offset | ||||||
|     // Calculate random offset (0 to min(interval/2, 5s)) |     // Calculate random offset (0 to min(interval/2, 5s)) | ||||||
|     uint32_t offset = (uint32_t) (std::min(delay / 2, MAX_INTERVAL_DELAY) * random_float()); |     uint32_t offset = (uint32_t) (std::min(delay / 2, MAX_INTERVAL_DELAY) * random_float()); | ||||||
|     item->next_execution_ = now + offset; |     item->set_next_execution(now + offset); | ||||||
|     ESP_LOGV(TAG, "Scheduler interval for %s is %" PRIu32 "ms, offset %" PRIu32 "ms", name_cstr ? name_cstr : "", delay, |     ESP_LOGV(TAG, "Scheduler interval for %s is %" PRIu32 "ms, offset %" PRIu32 "ms", name_cstr ? name_cstr : "", delay, | ||||||
|              offset); |              offset); | ||||||
|   } else { |   } else { | ||||||
|     item->interval = 0; |     item->interval = 0; | ||||||
|     item->next_execution_ = now + delay; |     item->set_next_execution(now + delay); | ||||||
|   } |   } | ||||||
|  |  | ||||||
| #ifdef ESPHOME_DEBUG_SCHEDULER | #ifdef ESPHOME_DEBUG_SCHEDULER | ||||||
| @@ -138,12 +166,11 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type | |||||||
|              name_cstr ? name_cstr : "(null)", type_str, delay); |              name_cstr ? name_cstr : "(null)", type_str, delay); | ||||||
|   } else { |   } else { | ||||||
|     ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ", offset=%" PRIu32 ")", type_str, LOG_STR_ARG(item->get_source()), |     ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ", offset=%" PRIu32 ")", type_str, LOG_STR_ARG(item->get_source()), | ||||||
|              name_cstr ? name_cstr : "(null)", type_str, delay, static_cast<uint32_t>(item->next_execution_ - now)); |              name_cstr ? name_cstr : "(null)", type_str, delay, | ||||||
|  |              static_cast<uint32_t>(item->get_next_execution() - now)); | ||||||
|   } |   } | ||||||
| #endif /* ESPHOME_DEBUG_SCHEDULER */ | #endif /* ESPHOME_DEBUG_SCHEDULER */ | ||||||
|  |  | ||||||
|   LockGuard guard{this->lock_}; |  | ||||||
|  |  | ||||||
|   // For retries, check if there's a cancelled timeout first |   // For retries, check if there's a cancelled timeout first | ||||||
|   if (is_retry && name_cstr != nullptr && type == SchedulerItem::TIMEOUT && |   if (is_retry && name_cstr != nullptr && type == SchedulerItem::TIMEOUT && | ||||||
|       (has_cancelled_timeout_in_container_(this->items_, component, name_cstr, /* match_retry= */ true) || |       (has_cancelled_timeout_in_container_(this->items_, component, name_cstr, /* match_retry= */ true) || | ||||||
| @@ -285,9 +312,10 @@ optional<uint32_t> HOT Scheduler::next_schedule_in(uint32_t now) { | |||||||
|   auto &item = this->items_[0]; |   auto &item = this->items_[0]; | ||||||
|   // Convert the fresh timestamp from caller (usually Application::loop()) to 64-bit |   // Convert the fresh timestamp from caller (usually Application::loop()) to 64-bit | ||||||
|   const auto now_64 = this->millis_64_(now);  // 'now' from parameter - fresh from caller |   const auto now_64 = this->millis_64_(now);  // 'now' from parameter - fresh from caller | ||||||
|   if (item->next_execution_ < now_64) |   const uint64_t next_exec = item->get_next_execution(); | ||||||
|  |   if (next_exec < now_64) | ||||||
|     return 0; |     return 0; | ||||||
|   return item->next_execution_ - now_64; |   return next_exec - now_64; | ||||||
| } | } | ||||||
| void HOT Scheduler::call(uint32_t now) { | void HOT Scheduler::call(uint32_t now) { | ||||||
| #ifndef ESPHOME_THREAD_SINGLE | #ifndef ESPHOME_THREAD_SINGLE | ||||||
| @@ -319,6 +347,8 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
|     if (!this->should_skip_item_(item.get())) { |     if (!this->should_skip_item_(item.get())) { | ||||||
|       this->execute_item_(item.get(), now); |       this->execute_item_(item.get(), now); | ||||||
|     } |     } | ||||||
|  |     // Recycle the defer item after execution | ||||||
|  |     this->recycle_item_(std::move(item)); | ||||||
|   } |   } | ||||||
| #endif /* not ESPHOME_THREAD_SINGLE */ | #endif /* not ESPHOME_THREAD_SINGLE */ | ||||||
|  |  | ||||||
| @@ -326,6 +356,9 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
|   const auto now_64 = this->millis_64_(now);  // 'now' from parameter - fresh from Application::loop() |   const auto now_64 = this->millis_64_(now);  // 'now' from parameter - fresh from Application::loop() | ||||||
|   this->process_to_add(); |   this->process_to_add(); | ||||||
|  |  | ||||||
|  |   // Track if any items were added to to_add_ during this call (intervals or from callbacks) | ||||||
|  |   bool has_added_items = false; | ||||||
|  |  | ||||||
| #ifdef ESPHOME_DEBUG_SCHEDULER | #ifdef ESPHOME_DEBUG_SCHEDULER | ||||||
|   static uint64_t last_print = 0; |   static uint64_t last_print = 0; | ||||||
|  |  | ||||||
| @@ -335,11 +368,11 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
| #ifdef ESPHOME_THREAD_MULTI_ATOMICS | #ifdef ESPHOME_THREAD_MULTI_ATOMICS | ||||||
|     const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed); |     const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed); | ||||||
|     const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed); |     const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed); | ||||||
|     ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64, |     ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), | ||||||
|              major_dbg, last_dbg); |              this->scheduler_item_pool_.size(), now_64, major_dbg, last_dbg); | ||||||
| #else  /* not ESPHOME_THREAD_MULTI_ATOMICS */ | #else  /* not ESPHOME_THREAD_MULTI_ATOMICS */ | ||||||
|     ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64, |     ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), | ||||||
|              this->millis_major_, this->last_millis_); |              this->scheduler_item_pool_.size(), now_64, this->millis_major_, this->last_millis_); | ||||||
| #endif /* else ESPHOME_THREAD_MULTI_ATOMICS */ | #endif /* else ESPHOME_THREAD_MULTI_ATOMICS */ | ||||||
|     // Cleanup before debug output |     // Cleanup before debug output | ||||||
|     this->cleanup_(); |     this->cleanup_(); | ||||||
| @@ -352,9 +385,10 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
|       } |       } | ||||||
|  |  | ||||||
|       const char *name = item->get_name(); |       const char *name = item->get_name(); | ||||||
|       ESP_LOGD(TAG, "  %s '%s/%s' interval=%" PRIu32 " next_execution in %" PRIu64 "ms at %" PRIu64, |       bool is_cancelled = is_item_removed_(item.get()); | ||||||
|  |       ESP_LOGD(TAG, "  %s '%s/%s' interval=%" PRIu32 " next_execution in %" PRIu64 "ms at %" PRIu64 "%s", | ||||||
|                item->get_type_str(), LOG_STR_ARG(item->get_source()), name ? name : "(null)", item->interval, |                item->get_type_str(), LOG_STR_ARG(item->get_source()), name ? name : "(null)", item->interval, | ||||||
|                item->next_execution_ - now_64, item->next_execution_); |                item->get_next_execution() - now_64, item->get_next_execution(), is_cancelled ? " [CANCELLED]" : ""); | ||||||
|  |  | ||||||
|       old_items.push_back(std::move(item)); |       old_items.push_back(std::move(item)); | ||||||
|     } |     } | ||||||
| @@ -369,8 +403,13 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
|   } |   } | ||||||
| #endif /* ESPHOME_DEBUG_SCHEDULER */ | #endif /* ESPHOME_DEBUG_SCHEDULER */ | ||||||
|  |  | ||||||
|   // If we have too many items to remove |   // Cleanup removed items before processing | ||||||
|   if (this->to_remove_ > MAX_LOGICALLY_DELETED_ITEMS) { |   // First try to clean items from the top of the heap (fast path) | ||||||
|  |   this->cleanup_(); | ||||||
|  |  | ||||||
|  |   // If we still have too many cancelled items, do a full cleanup | ||||||
|  |   // This only happens if cancelled items are stuck in the middle/bottom of the heap | ||||||
|  |   if (this->to_remove_ >= MAX_LOGICALLY_DELETED_ITEMS) { | ||||||
|     // We hold the lock for the entire cleanup operation because: |     // We hold the lock for the entire cleanup operation because: | ||||||
|     // 1. We're rebuilding the entire items_ list, so we need exclusive access throughout |     // 1. We're rebuilding the entire items_ list, so we need exclusive access throughout | ||||||
|     // 2. Other threads must see either the old state or the new state, not intermediate states |     // 2. Other threads must see either the old state or the new state, not intermediate states | ||||||
| @@ -380,10 +419,13 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
|  |  | ||||||
|     std::vector<std::unique_ptr<SchedulerItem>> valid_items; |     std::vector<std::unique_ptr<SchedulerItem>> valid_items; | ||||||
|  |  | ||||||
|     // Move all non-removed items to valid_items |     // Move all non-removed items to valid_items, recycle removed ones | ||||||
|     for (auto &item : this->items_) { |     for (auto &item : this->items_) { | ||||||
|       if (!item->remove) { |       if (!is_item_removed_(item.get())) { | ||||||
|         valid_items.push_back(std::move(item)); |         valid_items.push_back(std::move(item)); | ||||||
|  |       } else { | ||||||
|  |         // Recycle removed items | ||||||
|  |         this->recycle_item_(std::move(item)); | ||||||
|       } |       } | ||||||
|     } |     } | ||||||
|  |  | ||||||
| @@ -393,15 +435,12 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
|     std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp); |     std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp); | ||||||
|     this->to_remove_ = 0; |     this->to_remove_ = 0; | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   // Cleanup removed items before processing |  | ||||||
|   this->cleanup_(); |  | ||||||
|   while (!this->items_.empty()) { |   while (!this->items_.empty()) { | ||||||
|     // use scoping to indicate visibility of `item` variable |     // use scoping to indicate visibility of `item` variable | ||||||
|     { |     { | ||||||
|       // Don't copy-by value yet |       // Don't copy-by value yet | ||||||
|       auto &item = this->items_[0]; |       auto &item = this->items_[0]; | ||||||
|       if (item->next_execution_ > now_64) { |       if (item->get_next_execution() > now_64) { | ||||||
|         // Not reached timeout yet, done for this call |         // Not reached timeout yet, done for this call | ||||||
|         break; |         break; | ||||||
|       } |       } | ||||||
| @@ -440,7 +479,7 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
|       const char *item_name = item->get_name(); |       const char *item_name = item->get_name(); | ||||||
|       ESP_LOGV(TAG, "Running %s '%s/%s' with interval=%" PRIu32 " next_execution=%" PRIu64 " (now=%" PRIu64 ")", |       ESP_LOGV(TAG, "Running %s '%s/%s' with interval=%" PRIu32 " next_execution=%" PRIu64 " (now=%" PRIu64 ")", | ||||||
|                item->get_type_str(), LOG_STR_ARG(item->get_source()), item_name ? item_name : "(null)", item->interval, |                item->get_type_str(), LOG_STR_ARG(item->get_source()), item_name ? item_name : "(null)", item->interval, | ||||||
|                item->next_execution_, now_64); |                item->get_next_execution(), now_64); | ||||||
| #endif /* ESPHOME_DEBUG_SCHEDULER */ | #endif /* ESPHOME_DEBUG_SCHEDULER */ | ||||||
|  |  | ||||||
|       // Warning: During callback(), a lot of stuff can happen, including: |       // Warning: During callback(), a lot of stuff can happen, including: | ||||||
| @@ -465,20 +504,29 @@ void HOT Scheduler::call(uint32_t now) { | |||||||
|       } |       } | ||||||
|  |  | ||||||
|       if (item->type == SchedulerItem::INTERVAL) { |       if (item->type == SchedulerItem::INTERVAL) { | ||||||
|         item->next_execution_ = now_64 + item->interval; |         item->set_next_execution(now_64 + item->interval); | ||||||
|         // Add new item directly to to_add_ |         // Add new item directly to to_add_ | ||||||
|         // since we have the lock held |         // since we have the lock held | ||||||
|         this->to_add_.push_back(std::move(item)); |         this->to_add_.push_back(std::move(item)); | ||||||
|  |       } else { | ||||||
|  |         // Timeout completed - recycle it | ||||||
|  |         this->recycle_item_(std::move(item)); | ||||||
|       } |       } | ||||||
|  |  | ||||||
|  |       has_added_items |= !this->to_add_.empty(); | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   this->process_to_add(); |   if (has_added_items) { | ||||||
|  |     this->process_to_add(); | ||||||
|  |   } | ||||||
| } | } | ||||||
| void HOT Scheduler::process_to_add() { | void HOT Scheduler::process_to_add() { | ||||||
|   LockGuard guard{this->lock_}; |   LockGuard guard{this->lock_}; | ||||||
|   for (auto &it : this->to_add_) { |   for (auto &it : this->to_add_) { | ||||||
|     if (it->remove) { |     if (is_item_removed_(it.get())) { | ||||||
|  |       // Recycle cancelled items | ||||||
|  |       this->recycle_item_(std::move(it)); | ||||||
|       continue; |       continue; | ||||||
|     } |     } | ||||||
|  |  | ||||||
| @@ -518,6 +566,10 @@ size_t HOT Scheduler::cleanup_() { | |||||||
| } | } | ||||||
| void HOT Scheduler::pop_raw_() { | void HOT Scheduler::pop_raw_() { | ||||||
|   std::pop_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp); |   std::pop_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp); | ||||||
|  |  | ||||||
|  |   // Instead of destroying, recycle the item | ||||||
|  |   this->recycle_item_(std::move(this->items_.back())); | ||||||
|  |  | ||||||
|   this->items_.pop_back(); |   this->items_.pop_back(); | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -552,7 +604,7 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c | |||||||
|  |  | ||||||
|   // Check all containers for matching items |   // Check all containers for matching items | ||||||
| #ifndef ESPHOME_THREAD_SINGLE | #ifndef ESPHOME_THREAD_SINGLE | ||||||
|   // Only check defer queue for timeouts (intervals never go there) |   // Mark items in defer queue as cancelled (they'll be skipped when processed) | ||||||
|   if (type == SchedulerItem::TIMEOUT) { |   if (type == SchedulerItem::TIMEOUT) { | ||||||
|     for (auto &item : this->defer_queue_) { |     for (auto &item : this->defer_queue_) { | ||||||
|       if (this->matches_item_(item, component, name_cstr, type, match_retry)) { |       if (this->matches_item_(item, component, name_cstr, type, match_retry)) { | ||||||
| @@ -564,11 +616,22 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c | |||||||
| #endif /* not ESPHOME_THREAD_SINGLE */ | #endif /* not ESPHOME_THREAD_SINGLE */ | ||||||
|  |  | ||||||
|   // Cancel items in the main heap |   // Cancel items in the main heap | ||||||
|   for (auto &item : this->items_) { |   // Special case: if the last item in the heap matches, we can remove it immediately | ||||||
|     if (this->matches_item_(item, component, name_cstr, type, match_retry)) { |   // (removing the last element doesn't break heap structure) | ||||||
|       this->mark_item_removed_(item.get()); |   if (!this->items_.empty()) { | ||||||
|  |     auto &last_item = this->items_.back(); | ||||||
|  |     if (this->matches_item_(last_item, component, name_cstr, type, match_retry)) { | ||||||
|  |       this->recycle_item_(std::move(this->items_.back())); | ||||||
|  |       this->items_.pop_back(); | ||||||
|       total_cancelled++; |       total_cancelled++; | ||||||
|       this->to_remove_++;  // Track removals for heap items |     } | ||||||
|  |     // For other items in heap, we can only mark for removal (can't remove from middle of heap) | ||||||
|  |     for (auto &item : this->items_) { | ||||||
|  |       if (this->matches_item_(item, component, name_cstr, type, match_retry)) { | ||||||
|  |         this->mark_item_removed_(item.get()); | ||||||
|  |         total_cancelled++; | ||||||
|  |         this->to_remove_++;  // Track removals for heap items | ||||||
|  |       } | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
|  |  | ||||||
| @@ -744,7 +807,31 @@ uint64_t Scheduler::millis_64_(uint32_t now) { | |||||||
|  |  | ||||||
| bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a, | bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a, | ||||||
|                                        const std::unique_ptr<SchedulerItem> &b) { |                                        const std::unique_ptr<SchedulerItem> &b) { | ||||||
|   return a->next_execution_ > b->next_execution_; |   // High bits are almost always equal (change only on 32-bit rollover ~49 days) | ||||||
|  |   // Optimize for common case: check low bits first when high bits are equal | ||||||
|  |   return (a->next_execution_high_ == b->next_execution_high_) ? (a->next_execution_low_ > b->next_execution_low_) | ||||||
|  |                                                               : (a->next_execution_high_ > b->next_execution_high_); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | void Scheduler::recycle_item_(std::unique_ptr<SchedulerItem> item) { | ||||||
|  |   if (!item) | ||||||
|  |     return; | ||||||
|  |  | ||||||
|  |   if (this->scheduler_item_pool_.size() < MAX_POOL_SIZE) { | ||||||
|  |     // Clear callback to release captured resources | ||||||
|  |     item->callback = nullptr; | ||||||
|  |     // Clear dynamic name if any | ||||||
|  |     item->clear_dynamic_name(); | ||||||
|  |     this->scheduler_item_pool_.push_back(std::move(item)); | ||||||
|  | #ifdef ESPHOME_DEBUG_SCHEDULER | ||||||
|  |     ESP_LOGD(TAG, "Recycled item to pool (pool size now: %zu)", this->scheduler_item_pool_.size()); | ||||||
|  | #endif | ||||||
|  |   } else { | ||||||
|  | #ifdef ESPHOME_DEBUG_SCHEDULER | ||||||
|  |     ESP_LOGD(TAG, "Pool full (size: %zu), deleting item", this->scheduler_item_pool_.size()); | ||||||
|  | #endif | ||||||
|  |   } | ||||||
|  |   // else: unique_ptr will delete the item when it goes out of scope | ||||||
| } | } | ||||||
|  |  | ||||||
| }  // namespace esphome | }  // namespace esphome | ||||||
|   | |||||||
| @@ -88,19 +88,22 @@ class Scheduler { | |||||||
|   struct SchedulerItem { |   struct SchedulerItem { | ||||||
|     // Ordered by size to minimize padding |     // Ordered by size to minimize padding | ||||||
|     Component *component; |     Component *component; | ||||||
|     uint32_t interval; |  | ||||||
|     // 64-bit time to handle millis() rollover. The scheduler combines the 32-bit millis() |  | ||||||
|     // with a 16-bit rollover counter to create a 64-bit time that won't roll over for |  | ||||||
|     // billions of years. This ensures correct scheduling even when devices run for months. |  | ||||||
|     uint64_t next_execution_; |  | ||||||
|  |  | ||||||
|     // Optimized name storage using tagged union |     // Optimized name storage using tagged union | ||||||
|     union { |     union { | ||||||
|       const char *static_name;  // For string literals (no allocation) |       const char *static_name;  // For string literals (no allocation) | ||||||
|       char *dynamic_name;       // For allocated strings |       char *dynamic_name;       // For allocated strings | ||||||
|     } name_; |     } name_; | ||||||
|  |     uint32_t interval; | ||||||
|  |     // Split time to handle millis() rollover. The scheduler combines the 32-bit millis() | ||||||
|  |     // with a 16-bit rollover counter to create a 48-bit time space (using 32+16 bits). | ||||||
|  |     // This is intentionally limited to 48 bits, not stored as a full 64-bit value. | ||||||
|  |     // With 49.7 days per 32-bit rollover, the 16-bit counter supports | ||||||
|  |     // 49.7 days × 65536 = ~8900 years. This ensures correct scheduling | ||||||
|  |     // even when devices run for months. Split into two fields for better memory | ||||||
|  |     // alignment on 32-bit systems. | ||||||
|  |     uint32_t next_execution_low_;  // Lower 32 bits of execution time (millis value) | ||||||
|     std::function<void()> callback; |     std::function<void()> callback; | ||||||
|  |     uint16_t next_execution_high_;  // Upper 16 bits (millis_major counter) | ||||||
|  |  | ||||||
| #ifdef ESPHOME_THREAD_MULTI_ATOMICS | #ifdef ESPHOME_THREAD_MULTI_ATOMICS | ||||||
|     // Multi-threaded with atomics: use atomic for lock-free access |     // Multi-threaded with atomics: use atomic for lock-free access | ||||||
| @@ -126,7 +129,8 @@ class Scheduler { | |||||||
|     SchedulerItem() |     SchedulerItem() | ||||||
|         : component(nullptr), |         : component(nullptr), | ||||||
|           interval(0), |           interval(0), | ||||||
|           next_execution_(0), |           next_execution_low_(0), | ||||||
|  |           next_execution_high_(0), | ||||||
| #ifdef ESPHOME_THREAD_MULTI_ATOMICS | #ifdef ESPHOME_THREAD_MULTI_ATOMICS | ||||||
|           // remove is initialized in the member declaration as std::atomic<bool>{false} |           // remove is initialized in the member declaration as std::atomic<bool>{false} | ||||||
|           type(TIMEOUT), |           type(TIMEOUT), | ||||||
| @@ -142,11 +146,7 @@ class Scheduler { | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     // Destructor to clean up dynamic names |     // Destructor to clean up dynamic names | ||||||
|     ~SchedulerItem() { |     ~SchedulerItem() { clear_dynamic_name(); } | ||||||
|       if (name_is_dynamic) { |  | ||||||
|         delete[] name_.dynamic_name; |  | ||||||
|       } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     // Delete copy operations to prevent accidental copies |     // Delete copy operations to prevent accidental copies | ||||||
|     SchedulerItem(const SchedulerItem &) = delete; |     SchedulerItem(const SchedulerItem &) = delete; | ||||||
| @@ -159,13 +159,19 @@ class Scheduler { | |||||||
|     // Helper to get the name regardless of storage type |     // Helper to get the name regardless of storage type | ||||||
|     const char *get_name() const { return name_is_dynamic ? name_.dynamic_name : name_.static_name; } |     const char *get_name() const { return name_is_dynamic ? name_.dynamic_name : name_.static_name; } | ||||||
|  |  | ||||||
|  |     // Helper to clear dynamic name if allocated | ||||||
|  |     void clear_dynamic_name() { | ||||||
|  |       if (name_is_dynamic && name_.dynamic_name) { | ||||||
|  |         delete[] name_.dynamic_name; | ||||||
|  |         name_.dynamic_name = nullptr; | ||||||
|  |         name_is_dynamic = false; | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |  | ||||||
|     // Helper to set name with proper ownership |     // Helper to set name with proper ownership | ||||||
|     void set_name(const char *name, bool make_copy = false) { |     void set_name(const char *name, bool make_copy = false) { | ||||||
|       // Clean up old dynamic name if any |       // Clean up old dynamic name if any | ||||||
|       if (name_is_dynamic && name_.dynamic_name) { |       clear_dynamic_name(); | ||||||
|         delete[] name_.dynamic_name; |  | ||||||
|         name_is_dynamic = false; |  | ||||||
|       } |  | ||||||
|  |  | ||||||
|       if (!name) { |       if (!name) { | ||||||
|         // nullptr case - no name provided |         // nullptr case - no name provided | ||||||
| @@ -183,7 +189,21 @@ class Scheduler { | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     static bool cmp(const std::unique_ptr<SchedulerItem> &a, const std::unique_ptr<SchedulerItem> &b); |     static bool cmp(const std::unique_ptr<SchedulerItem> &a, const std::unique_ptr<SchedulerItem> &b); | ||||||
|     const char *get_type_str() const { return (type == TIMEOUT) ? "timeout" : "interval"; } |  | ||||||
|  |     // Note: We use 48 bits total (32 + 16), stored in a 64-bit value for API compatibility. | ||||||
|  |     // The upper 16 bits of the 64-bit value are always zero, which is fine since | ||||||
|  |     // millis_major_ is also 16 bits and they must match. | ||||||
|  |     constexpr uint64_t get_next_execution() const { | ||||||
|  |       return (static_cast<uint64_t>(next_execution_high_) << 32) | next_execution_low_; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     constexpr void set_next_execution(uint64_t value) { | ||||||
|  |       next_execution_low_ = static_cast<uint32_t>(value); | ||||||
|  |       // Cast to uint16_t intentionally truncates to lower 16 bits of the upper 32 bits. | ||||||
|  |       // This is correct because millis_major_ that creates these values is also 16 bits. | ||||||
|  |       next_execution_high_ = static_cast<uint16_t>(value >> 32); | ||||||
|  |     } | ||||||
|  |     constexpr const char *get_type_str() const { return (type == TIMEOUT) ? "timeout" : "interval"; } | ||||||
|     const LogString *get_source() const { return component ? component->get_component_log_str() : LOG_STR("unknown"); } |     const LogString *get_source() const { return component ? component->get_component_log_str() : LOG_STR("unknown"); } | ||||||
|   }; |   }; | ||||||
|  |  | ||||||
| @@ -214,6 +234,15 @@ class Scheduler { | |||||||
|   // Common implementation for cancel operations |   // Common implementation for cancel operations | ||||||
|   bool cancel_item_(Component *component, bool is_static_string, const void *name_ptr, SchedulerItem::Type type); |   bool cancel_item_(Component *component, bool is_static_string, const void *name_ptr, SchedulerItem::Type type); | ||||||
|  |  | ||||||
|  |   // Helper to check if two scheduler item names match | ||||||
|  |   inline bool HOT names_match_(const char *name1, const char *name2) const { | ||||||
|  |     // Check pointer equality first (common for static strings), then string contents | ||||||
|  |     // The core ESPHome codebase uses static strings (const char*) for component names, | ||||||
|  |     // making pointer comparison effective. The std::string overloads exist only for | ||||||
|  |     // compatibility with external components but are rarely used in practice. | ||||||
|  |     return (name1 != nullptr && name2 != nullptr) && ((name1 == name2) || (strcmp(name1, name2) == 0)); | ||||||
|  |   } | ||||||
|  |  | ||||||
|   // Helper function to check if item matches criteria for cancellation |   // Helper function to check if item matches criteria for cancellation | ||||||
|   inline bool HOT matches_item_(const std::unique_ptr<SchedulerItem> &item, Component *component, const char *name_cstr, |   inline bool HOT matches_item_(const std::unique_ptr<SchedulerItem> &item, Component *component, const char *name_cstr, | ||||||
|                                 SchedulerItem::Type type, bool match_retry, bool skip_removed = true) const { |                                 SchedulerItem::Type type, bool match_retry, bool skip_removed = true) const { | ||||||
| @@ -221,29 +250,20 @@ class Scheduler { | |||||||
|         (match_retry && !item->is_retry)) { |         (match_retry && !item->is_retry)) { | ||||||
|       return false; |       return false; | ||||||
|     } |     } | ||||||
|     const char *item_name = item->get_name(); |     return this->names_match_(item->get_name(), name_cstr); | ||||||
|     if (item_name == nullptr) { |  | ||||||
|       return false; |  | ||||||
|     } |  | ||||||
|     // Fast path: if pointers are equal |  | ||||||
|     // This is effective because the core ESPHome codebase uses static strings (const char*) |  | ||||||
|     // for component names. The std::string overloads exist only for compatibility with |  | ||||||
|     // external components, but are rarely used in practice. |  | ||||||
|     if (item_name == name_cstr) { |  | ||||||
|       return true; |  | ||||||
|     } |  | ||||||
|     // Slow path: compare string contents |  | ||||||
|     return strcmp(name_cstr, item_name) == 0; |  | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   // Helper to execute a scheduler item |   // Helper to execute a scheduler item | ||||||
|   void execute_item_(SchedulerItem *item, uint32_t now); |   void execute_item_(SchedulerItem *item, uint32_t now); | ||||||
|  |  | ||||||
|   // Helper to check if item should be skipped |   // Helper to check if item should be skipped | ||||||
|   bool should_skip_item_(const SchedulerItem *item) const { |   bool should_skip_item_(SchedulerItem *item) const { | ||||||
|     return item->remove || (item->component != nullptr && item->component->is_failed()); |     return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed()); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|  |   // Helper to recycle a SchedulerItem | ||||||
|  |   void recycle_item_(std::unique_ptr<SchedulerItem> item); | ||||||
|  |  | ||||||
|   // Helper to check if item is marked for removal (platform-specific) |   // Helper to check if item is marked for removal (platform-specific) | ||||||
|   // Returns true if item should be skipped, handles platform-specific synchronization |   // Returns true if item should be skipped, handles platform-specific synchronization | ||||||
|   // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this |   // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this | ||||||
| @@ -280,8 +300,9 @@ class Scheduler { | |||||||
|   bool has_cancelled_timeout_in_container_(const Container &container, Component *component, const char *name_cstr, |   bool has_cancelled_timeout_in_container_(const Container &container, Component *component, const char *name_cstr, | ||||||
|                                            bool match_retry) const { |                                            bool match_retry) const { | ||||||
|     for (const auto &item : container) { |     for (const auto &item : container) { | ||||||
|       if (item->remove && this->matches_item_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry, |       if (is_item_removed_(item.get()) && | ||||||
|                                               /* skip_removed= */ false)) { |           this->matches_item_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry, | ||||||
|  |                               /* skip_removed= */ false)) { | ||||||
|         return true; |         return true; | ||||||
|       } |       } | ||||||
|     } |     } | ||||||
| @@ -297,6 +318,16 @@ class Scheduler { | |||||||
| #endif                                                      /* ESPHOME_THREAD_SINGLE */ | #endif                                                      /* ESPHOME_THREAD_SINGLE */ | ||||||
|   uint32_t to_remove_{0}; |   uint32_t to_remove_{0}; | ||||||
|  |  | ||||||
|  |   // Memory pool for recycling SchedulerItem objects to reduce heap churn. | ||||||
|  |   // Design decisions: | ||||||
|  |   // - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items | ||||||
|  |   // - The vector grows dynamically up to MAX_POOL_SIZE (5) only when needed, saving memory on simple setups | ||||||
|  |   // - Pool size of 5 matches typical usage (2-4 timers) while keeping memory overhead low (~250 bytes on ESP32) | ||||||
|  |   // - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation | ||||||
|  |   //   can stall the entire system, causing timing issues and dropped events for any components that need | ||||||
|  |   //   to synchronize between tasks (see https://github.com/esphome/backlog/issues/52) | ||||||
|  |   std::vector<std::unique_ptr<SchedulerItem>> scheduler_item_pool_; | ||||||
|  |  | ||||||
| #ifdef ESPHOME_THREAD_MULTI_ATOMICS | #ifdef ESPHOME_THREAD_MULTI_ATOMICS | ||||||
|   /* |   /* | ||||||
|    * Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates |    * Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates | ||||||
|   | |||||||
| @@ -308,8 +308,12 @@ def perform_ota( | |||||||
|     time.sleep(1) |     time.sleep(1) | ||||||
|  |  | ||||||
|  |  | ||||||
| def run_ota_impl_(remote_host, remote_port, password, filename): | def run_ota_impl_( | ||||||
|  |     remote_host: str | list[str], remote_port: int, password: str, filename: str | ||||||
|  | ) -> int: | ||||||
|  |     # Handle both single host and list of hosts | ||||||
|     try: |     try: | ||||||
|  |         # Resolve all hosts at once for parallel DNS resolution | ||||||
|         res = resolve_ip_address(remote_host, remote_port) |         res = resolve_ip_address(remote_host, remote_port) | ||||||
|     except EsphomeError as err: |     except EsphomeError as err: | ||||||
|         _LOGGER.error( |         _LOGGER.error( | ||||||
| @@ -350,7 +354,9 @@ def run_ota_impl_(remote_host, remote_port, password, filename): | |||||||
|     return 1 |     return 1 | ||||||
|  |  | ||||||
|  |  | ||||||
| def run_ota(remote_host, remote_port, password, filename): | def run_ota( | ||||||
|  |     remote_host: str | list[str], remote_port: int, password: str, filename: str | ||||||
|  | ) -> int: | ||||||
|     try: |     try: | ||||||
|         return run_ota_impl_(remote_host, remote_port, password, filename) |         return run_ota_impl_(remote_host, remote_port, password, filename) | ||||||
|     except OTAError as err: |     except OTAError as err: | ||||||
|   | |||||||
| @@ -1,3 +1,5 @@ | |||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
| import codecs | import codecs | ||||||
| from contextlib import suppress | from contextlib import suppress | ||||||
| import ipaddress | import ipaddress | ||||||
| @@ -11,6 +13,18 @@ from urllib.parse import urlparse | |||||||
|  |  | ||||||
| from esphome.const import __version__ as ESPHOME_VERSION | from esphome.const import __version__ as ESPHOME_VERSION | ||||||
|  |  | ||||||
|  | # Type aliases for socket address information | ||||||
|  | AddrInfo = tuple[ | ||||||
|  |     int,  # family (AF_INET, AF_INET6, etc.) | ||||||
|  |     int,  # type (SOCK_STREAM, SOCK_DGRAM, etc.) | ||||||
|  |     int,  # proto (IPPROTO_TCP, etc.) | ||||||
|  |     str,  # canonname | ||||||
|  |     tuple[str, int] | tuple[str, int, int, int],  # sockaddr (IPv4 or IPv6) | ||||||
|  | ] | ||||||
|  | IPv4SockAddr = tuple[str, int]  # (host, port) | ||||||
|  | IPv6SockAddr = tuple[str, int, int, int]  # (host, port, flowinfo, scope_id) | ||||||
|  | SockAddr = IPv4SockAddr | IPv6SockAddr | ||||||
|  |  | ||||||
| _LOGGER = logging.getLogger(__name__) | _LOGGER = logging.getLogger(__name__) | ||||||
|  |  | ||||||
| IS_MACOS = platform.system() == "Darwin" | IS_MACOS = platform.system() == "Darwin" | ||||||
| @@ -147,32 +161,7 @@ def is_ip_address(host): | |||||||
|         return False |         return False | ||||||
|  |  | ||||||
|  |  | ||||||
| def _resolve_with_zeroconf(host): | def addr_preference_(res: AddrInfo) -> int: | ||||||
|     from esphome.core import EsphomeError |  | ||||||
|     from esphome.zeroconf import EsphomeZeroconf |  | ||||||
|  |  | ||||||
|     try: |  | ||||||
|         zc = EsphomeZeroconf() |  | ||||||
|     except Exception as err: |  | ||||||
|         raise EsphomeError( |  | ||||||
|             "Cannot start mDNS sockets, is this a docker container without " |  | ||||||
|             "host network mode?" |  | ||||||
|         ) from err |  | ||||||
|     try: |  | ||||||
|         info = zc.resolve_host(f"{host}.") |  | ||||||
|     except Exception as err: |  | ||||||
|         raise EsphomeError(f"Error resolving mDNS hostname: {err}") from err |  | ||||||
|     finally: |  | ||||||
|         zc.close() |  | ||||||
|     if info is None: |  | ||||||
|         raise EsphomeError( |  | ||||||
|             "Error resolving address with mDNS: Did not respond. " |  | ||||||
|             "Maybe the device is offline." |  | ||||||
|         ) |  | ||||||
|     return info |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def addr_preference_(res): |  | ||||||
|     # Trivial alternative to RFC6724 sorting. Put sane IPv6 first, then |     # Trivial alternative to RFC6724 sorting. Put sane IPv6 first, then | ||||||
|     # Legacy IP, then IPv6 link-local addresses without an actual link. |     # Legacy IP, then IPv6 link-local addresses without an actual link. | ||||||
|     sa = res[4] |     sa = res[4] | ||||||
| @@ -184,66 +173,70 @@ def addr_preference_(res): | |||||||
|     return 1 |     return 1 | ||||||
|  |  | ||||||
|  |  | ||||||
| def resolve_ip_address(host, port): | def resolve_ip_address(host: str | list[str], port: int) -> list[AddrInfo]: | ||||||
|     import socket |     import socket | ||||||
|  |  | ||||||
|     from esphome.core import EsphomeError |  | ||||||
|  |  | ||||||
|     # There are five cases here. The host argument could be one of: |     # There are five cases here. The host argument could be one of: | ||||||
|     #  • a *list* of IP addresses discovered by MQTT, |     #  • a *list* of IP addresses discovered by MQTT, | ||||||
|     #  • a single IP address specified by the user, |     #  • a single IP address specified by the user, | ||||||
|     #  • a .local hostname to be resolved by mDNS, |     #  • a .local hostname to be resolved by mDNS, | ||||||
|     #  • a normal hostname to be resolved in DNS, or |     #  • a normal hostname to be resolved in DNS, or | ||||||
|     #  • A URL from which we should extract the hostname. |     #  • A URL from which we should extract the hostname. | ||||||
|     # |  | ||||||
|     # In each of the first three cases, we end up with IP addresses in |  | ||||||
|     # string form which need to be converted to a 5-tuple to be used |  | ||||||
|     # for the socket connection attempt. The easiest way to construct |  | ||||||
|     # those is to pass the IP address string to getaddrinfo(). Which, |  | ||||||
|     # coincidentally, is how we do hostname lookups in the other cases |  | ||||||
|     # too. So first build a list which contains either IP addresses or |  | ||||||
|     # a single hostname, then call getaddrinfo() on each element of |  | ||||||
|     # that list. |  | ||||||
|  |  | ||||||
|     errs = [] |     hosts: list[str] | ||||||
|     if isinstance(host, list): |     if isinstance(host, list): | ||||||
|         addr_list = host |         hosts = host | ||||||
|     elif is_ip_address(host): |  | ||||||
|         addr_list = [host] |  | ||||||
|     else: |     else: | ||||||
|         url = urlparse(host) |         if not is_ip_address(host): | ||||||
|         if url.scheme != "": |             url = urlparse(host) | ||||||
|             host = url.hostname |             if url.scheme != "": | ||||||
|  |                 host = url.hostname | ||||||
|  |         hosts = [host] | ||||||
|  |  | ||||||
|         addr_list = [] |     res: list[AddrInfo] = [] | ||||||
|         if host.endswith(".local"): |     if all(is_ip_address(h) for h in hosts): | ||||||
|  |         # Fast path: all are IP addresses, use socket.getaddrinfo with AI_NUMERICHOST | ||||||
|  |         for addr in hosts: | ||||||
|             try: |             try: | ||||||
|                 _LOGGER.info("Resolving IP address of %s in mDNS", host) |                 res += socket.getaddrinfo( | ||||||
|                 addr_list = _resolve_with_zeroconf(host) |                     addr, port, proto=socket.IPPROTO_TCP, flags=socket.AI_NUMERICHOST | ||||||
|             except EsphomeError as err: |                 ) | ||||||
|                 errs.append(str(err)) |             except OSError: | ||||||
|  |                 _LOGGER.debug("Failed to parse IP address '%s'", addr) | ||||||
|  |         # Sort by preference | ||||||
|  |         res.sort(key=addr_preference_) | ||||||
|  |         return res | ||||||
|  |  | ||||||
|         # If not mDNS, or if mDNS failed, use normal DNS |     from esphome.resolver import AsyncResolver | ||||||
|         if not addr_list: |  | ||||||
|             addr_list = [host] |  | ||||||
|  |  | ||||||
|     # Now we have a list containing either IP addresses or a hostname |     resolver = AsyncResolver(hosts, port) | ||||||
|     res = [] |     addr_infos = resolver.resolve() | ||||||
|     for addr in addr_list: |     # Convert aioesphomeapi AddrInfo to our format | ||||||
|         if not is_ip_address(addr): |     for addr_info in addr_infos: | ||||||
|             _LOGGER.info("Resolving IP address of %s", host) |         sockaddr = addr_info.sockaddr | ||||||
|         try: |         if addr_info.family == socket.AF_INET6: | ||||||
|             r = socket.getaddrinfo(addr, port, proto=socket.IPPROTO_TCP) |             # IPv6 | ||||||
|         except OSError as err: |             sockaddr_tuple = ( | ||||||
|             errs.append(str(err)) |                 sockaddr.address, | ||||||
|             raise EsphomeError( |                 sockaddr.port, | ||||||
|                 f"Error resolving IP address: {', '.join(errs)}" |                 sockaddr.flowinfo, | ||||||
|             ) from err |                 sockaddr.scope_id, | ||||||
|  |             ) | ||||||
|  |         else: | ||||||
|  |             # IPv4 | ||||||
|  |             sockaddr_tuple = (sockaddr.address, sockaddr.port) | ||||||
|  |  | ||||||
|         res = res + r |         res.append( | ||||||
|  |             ( | ||||||
|  |                 addr_info.family, | ||||||
|  |                 addr_info.type, | ||||||
|  |                 addr_info.proto, | ||||||
|  |                 "",  # canonname | ||||||
|  |                 sockaddr_tuple, | ||||||
|  |             ) | ||||||
|  |         ) | ||||||
|  |  | ||||||
|     # Zeroconf tends to give us link-local IPv6 addresses without specifying |     # Sort by preference | ||||||
|     # the link. Put those last in the list to be attempted. |  | ||||||
|     res.sort(key=addr_preference_) |     res.sort(key=addr_preference_) | ||||||
|     return res |     return res | ||||||
|  |  | ||||||
| @@ -262,15 +255,7 @@ def sort_ip_addresses(address_list: list[str]) -> list[str]: | |||||||
|  |  | ||||||
|     # First "resolve" all the IP addresses to getaddrinfo() tuples of the form |     # First "resolve" all the IP addresses to getaddrinfo() tuples of the form | ||||||
|     # (family, type, proto, canonname, sockaddr) |     # (family, type, proto, canonname, sockaddr) | ||||||
|     res: list[ |     res: list[AddrInfo] = [] | ||||||
|         tuple[ |  | ||||||
|             int, |  | ||||||
|             int, |  | ||||||
|             int, |  | ||||||
|             str | None, |  | ||||||
|             tuple[str, int] | tuple[str, int, int, int], |  | ||||||
|         ] |  | ||||||
|     ] = [] |  | ||||||
|     for addr in address_list: |     for addr in address_list: | ||||||
|         # This should always work as these are supposed to be IP addresses |         # This should always work as these are supposed to be IP addresses | ||||||
|         try: |         try: | ||||||
|   | |||||||
							
								
								
									
										67
									
								
								esphome/resolver.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								esphome/resolver.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,67 @@ | |||||||
|  | """DNS resolver for ESPHome using aioesphomeapi.""" | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import asyncio | ||||||
|  | import threading | ||||||
|  |  | ||||||
|  | from aioesphomeapi.core import ResolveAPIError, ResolveTimeoutAPIError | ||||||
|  | import aioesphomeapi.host_resolver as hr | ||||||
|  |  | ||||||
|  | from esphome.core import EsphomeError | ||||||
|  |  | ||||||
|  | RESOLVE_TIMEOUT = 10.0  # seconds | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AsyncResolver(threading.Thread): | ||||||
|  |     """Resolver using aioesphomeapi that runs in a thread for faster results. | ||||||
|  |  | ||||||
|  |     This resolver uses aioesphomeapi's async_resolve_host to handle DNS resolution, | ||||||
|  |     including proper .local domain fallback. Running in a thread allows us to get | ||||||
|  |     the result immediately without waiting for asyncio.run() to complete its | ||||||
|  |     cleanup cycle, which can take significant time. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, hosts: list[str], port: int) -> None: | ||||||
|  |         """Initialize the resolver.""" | ||||||
|  |         super().__init__(daemon=True) | ||||||
|  |         self.hosts = hosts | ||||||
|  |         self.port = port | ||||||
|  |         self.result: list[hr.AddrInfo] | None = None | ||||||
|  |         self.exception: Exception | None = None | ||||||
|  |         self.event = threading.Event() | ||||||
|  |  | ||||||
|  |     async def _resolve(self) -> None: | ||||||
|  |         """Resolve hostnames to IP addresses.""" | ||||||
|  |         try: | ||||||
|  |             self.result = await hr.async_resolve_host( | ||||||
|  |                 self.hosts, self.port, timeout=RESOLVE_TIMEOUT | ||||||
|  |             ) | ||||||
|  |         except Exception as e:  # pylint: disable=broad-except | ||||||
|  |             # We need to catch all exceptions to ensure the event is set | ||||||
|  |             # Otherwise the thread could hang forever | ||||||
|  |             self.exception = e | ||||||
|  |         finally: | ||||||
|  |             self.event.set() | ||||||
|  |  | ||||||
|  |     def run(self) -> None: | ||||||
|  |         """Run the DNS resolution.""" | ||||||
|  |         asyncio.run(self._resolve()) | ||||||
|  |  | ||||||
|  |     def resolve(self) -> list[hr.AddrInfo]: | ||||||
|  |         """Start the thread and wait for the result.""" | ||||||
|  |         self.start() | ||||||
|  |  | ||||||
|  |         if not self.event.wait( | ||||||
|  |             timeout=RESOLVE_TIMEOUT + 1.0 | ||||||
|  |         ):  # Give it 1 second more than the resolver timeout | ||||||
|  |             raise EsphomeError("Timeout resolving IP address") | ||||||
|  |  | ||||||
|  |         if exc := self.exception: | ||||||
|  |             if isinstance(exc, ResolveTimeoutAPIError): | ||||||
|  |                 raise EsphomeError(f"Timeout resolving IP address: {exc}") from exc | ||||||
|  |             if isinstance(exc, ResolveAPIError): | ||||||
|  |                 raise EsphomeError(f"Error resolving IP address: {exc}") from exc | ||||||
|  |             raise exc | ||||||
|  |  | ||||||
|  |         return self.result | ||||||
| @@ -12,8 +12,8 @@ platformio==6.1.18  # When updating platformio, also update /docker/Dockerfile | |||||||
| esptool==5.0.2 | esptool==5.0.2 | ||||||
| click==8.1.7 | click==8.1.7 | ||||||
| esphome-dashboard==20250904.0 | esphome-dashboard==20250904.0 | ||||||
| aioesphomeapi==40.0.1 | aioesphomeapi==40.0.2 | ||||||
| zeroconf==0.147.0 | zeroconf==0.147.2 | ||||||
| puremagic==1.30 | puremagic==1.30 | ||||||
| ruamel.yaml==0.18.15 # dashboard_import | ruamel.yaml==0.18.15 # dashboard_import | ||||||
| esphome-glyphsets==0.2.0 | esphome-glyphsets==0.2.0 | ||||||
|   | |||||||
| @@ -6,7 +6,7 @@ pre-commit | |||||||
|  |  | ||||||
| # Unit tests | # Unit tests | ||||||
| pytest==8.4.2 | pytest==8.4.2 | ||||||
| pytest-cov==6.2.1 | pytest-cov==6.3.0 | ||||||
| pytest-mock==3.15.0 | pytest-mock==3.15.0 | ||||||
| pytest-asyncio==1.1.0 | pytest-asyncio==1.1.0 | ||||||
| pytest-xdist==3.8.0 | pytest-xdist==3.8.0 | ||||||
|   | |||||||
| @@ -27,11 +27,13 @@ void GPIOExpanderTestComponent::setup() { | |||||||
|  |  | ||||||
| bool GPIOExpanderTestComponent::digital_read_hw(uint8_t pin) { | bool GPIOExpanderTestComponent::digital_read_hw(uint8_t pin) { | ||||||
|   ESP_LOGD(TAG, "digital_read_hw pin=%d", pin); |   ESP_LOGD(TAG, "digital_read_hw pin=%d", pin); | ||||||
|  |   // Return true to indicate successful read operation | ||||||
|   return true; |   return true; | ||||||
| } | } | ||||||
|  |  | ||||||
| bool GPIOExpanderTestComponent::digital_read_cache(uint8_t pin) { | bool GPIOExpanderTestComponent::digital_read_cache(uint8_t pin) { | ||||||
|   ESP_LOGD(TAG, "digital_read_cache pin=%d", pin); |   ESP_LOGD(TAG, "digital_read_cache pin=%d", pin); | ||||||
|  |   // Return the pin state (always HIGH for testing) | ||||||
|   return true; |   return true; | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -0,0 +1,24 @@ | |||||||
|  | import esphome.codegen as cg | ||||||
|  | import esphome.config_validation as cv | ||||||
|  | from esphome.const import CONF_ID | ||||||
|  |  | ||||||
|  | AUTO_LOAD = ["gpio_expander"] | ||||||
|  |  | ||||||
|  | gpio_expander_test_component_uint16_ns = cg.esphome_ns.namespace( | ||||||
|  |     "gpio_expander_test_component_uint16" | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | GPIOExpanderTestUint16Component = gpio_expander_test_component_uint16_ns.class_( | ||||||
|  |     "GPIOExpanderTestUint16Component", cg.Component | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | CONFIG_SCHEMA = cv.Schema( | ||||||
|  |     { | ||||||
|  |         cv.GenerateID(): cv.declare_id(GPIOExpanderTestUint16Component), | ||||||
|  |     } | ||||||
|  | ).extend(cv.COMPONENT_SCHEMA) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | async def to_code(config): | ||||||
|  |     var = cg.new_Pvariable(config[CONF_ID]) | ||||||
|  |     await cg.register_component(var, config) | ||||||
| @@ -0,0 +1,43 @@ | |||||||
|  | #include "gpio_expander_test_component_uint16.h" | ||||||
|  | #include "esphome/core/log.h" | ||||||
|  |  | ||||||
|  | namespace esphome::gpio_expander_test_component_uint16 { | ||||||
|  |  | ||||||
|  | static const char *const TAG = "gpio_expander_test_uint16"; | ||||||
|  |  | ||||||
|  | void GPIOExpanderTestUint16Component::setup() { | ||||||
|  |   ESP_LOGD(TAG, "Testing uint16_t bank (single 16-pin bank)"); | ||||||
|  |  | ||||||
|  |   // Test reading all 16 pins - first should trigger hw read, rest use cache | ||||||
|  |   for (uint8_t pin = 0; pin < 16; pin++) { | ||||||
|  |     this->digital_read(pin); | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // Reset cache and test specific reads | ||||||
|  |   ESP_LOGD(TAG, "Resetting cache for uint16_t test"); | ||||||
|  |   this->reset_pin_cache_(); | ||||||
|  |  | ||||||
|  |   // First read triggers hw for entire bank | ||||||
|  |   this->digital_read(5); | ||||||
|  |   // These should all use cache since they're in the same bank | ||||||
|  |   this->digital_read(10); | ||||||
|  |   this->digital_read(15); | ||||||
|  |   this->digital_read(0); | ||||||
|  |  | ||||||
|  |   ESP_LOGD(TAG, "DONE_UINT16"); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | bool GPIOExpanderTestUint16Component::digital_read_hw(uint8_t pin) { | ||||||
|  |   ESP_LOGD(TAG, "uint16_digital_read_hw pin=%d", pin); | ||||||
|  |   // In a real component, this would read from I2C/SPI into internal state | ||||||
|  |   // For testing, we just return true to indicate successful read | ||||||
|  |   return true;  // Return true to indicate successful read | ||||||
|  | } | ||||||
|  |  | ||||||
|  | bool GPIOExpanderTestUint16Component::digital_read_cache(uint8_t pin) { | ||||||
|  |   ESP_LOGD(TAG, "uint16_digital_read_cache pin=%d", pin); | ||||||
|  |   // Return the actual pin state from our test pattern | ||||||
|  |   return (this->test_state_ >> pin) & 1; | ||||||
|  | } | ||||||
|  |  | ||||||
|  | }  // namespace esphome::gpio_expander_test_component_uint16 | ||||||
| @@ -0,0 +1,23 @@ | |||||||
|  | #pragma once | ||||||
|  |  | ||||||
|  | #include "esphome/components/gpio_expander/cached_gpio.h" | ||||||
|  | #include "esphome/core/component.h" | ||||||
|  |  | ||||||
|  | namespace esphome::gpio_expander_test_component_uint16 { | ||||||
|  |  | ||||||
|  | // Test component using uint16_t bank type (single 16-pin bank) | ||||||
|  | class GPIOExpanderTestUint16Component : public Component, | ||||||
|  |                                         public esphome::gpio_expander::CachedGpioExpander<uint16_t, 16> { | ||||||
|  |  public: | ||||||
|  |   void setup() override; | ||||||
|  |  | ||||||
|  |  protected: | ||||||
|  |   bool digital_read_hw(uint8_t pin) override; | ||||||
|  |   bool digital_read_cache(uint8_t pin) override; | ||||||
|  |   void digital_write_hw(uint8_t pin, bool value) override{}; | ||||||
|  |  | ||||||
|  |  private: | ||||||
|  |   uint16_t test_state_{0xAAAA};  // Test pattern: alternating bits | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | }  // namespace esphome::gpio_expander_test_component_uint16 | ||||||
| @@ -12,6 +12,10 @@ external_components: | |||||||
|   - source: |   - source: | ||||||
|       type: local |       type: local | ||||||
|       path: EXTERNAL_COMPONENT_PATH |       path: EXTERNAL_COMPONENT_PATH | ||||||
|     components: [gpio_expander_test_component] |     components: [gpio_expander_test_component, gpio_expander_test_component_uint16] | ||||||
|  |  | ||||||
|  | # Test with uint8_t (multiple banks) | ||||||
| gpio_expander_test_component: | gpio_expander_test_component: | ||||||
|  |  | ||||||
|  | # Test with uint16_t (single bank) | ||||||
|  | gpio_expander_test_component_uint16: | ||||||
|   | |||||||
							
								
								
									
										282
									
								
								tests/integration/fixtures/scheduler_pool.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										282
									
								
								tests/integration/fixtures/scheduler_pool.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,282 @@ | |||||||
|  | esphome: | ||||||
|  |   name: scheduler-pool-test | ||||||
|  |   on_boot: | ||||||
|  |     priority: -100 | ||||||
|  |     then: | ||||||
|  |       - logger.log: "Starting scheduler pool tests" | ||||||
|  |   debug_scheduler: true  # Enable scheduler debug logging | ||||||
|  |  | ||||||
|  | host: | ||||||
|  | api: | ||||||
|  |   services: | ||||||
|  |     - service: run_phase_1 | ||||||
|  |       then: | ||||||
|  |         - script.execute: test_pool_recycling | ||||||
|  |     - service: run_phase_2 | ||||||
|  |       then: | ||||||
|  |         - script.execute: test_sensor_polling | ||||||
|  |     - service: run_phase_3 | ||||||
|  |       then: | ||||||
|  |         - script.execute: test_communication_patterns | ||||||
|  |     - service: run_phase_4 | ||||||
|  |       then: | ||||||
|  |         - script.execute: test_defer_patterns | ||||||
|  |     - service: run_phase_5 | ||||||
|  |       then: | ||||||
|  |         - script.execute: test_pool_reuse_verification | ||||||
|  |     - service: run_phase_6 | ||||||
|  |       then: | ||||||
|  |         - script.execute: test_full_pool_reuse | ||||||
|  |     - service: run_phase_7 | ||||||
|  |       then: | ||||||
|  |         - script.execute: test_same_defer_optimization | ||||||
|  |     - service: run_complete | ||||||
|  |       then: | ||||||
|  |         - script.execute: complete_test | ||||||
|  | logger: | ||||||
|  |   level: VERY_VERBOSE  # Need VERY_VERBOSE to see pool debug messages | ||||||
|  |  | ||||||
|  | globals: | ||||||
|  |   - id: create_count | ||||||
|  |     type: int | ||||||
|  |     initial_value: '0' | ||||||
|  |   - id: cancel_count | ||||||
|  |     type: int | ||||||
|  |     initial_value: '0' | ||||||
|  |   - id: interval_counter | ||||||
|  |     type: int | ||||||
|  |     initial_value: '0' | ||||||
|  |   - id: pool_test_done | ||||||
|  |     type: bool | ||||||
|  |     initial_value: 'false' | ||||||
|  |  | ||||||
|  | script: | ||||||
|  |   - id: test_pool_recycling | ||||||
|  |     then: | ||||||
|  |       - logger.log: "Testing scheduler pool recycling with realistic usage patterns" | ||||||
|  |       - lambda: |- | ||||||
|  |           auto *component = id(test_sensor); | ||||||
|  |  | ||||||
|  |           // Simulate realistic component behavior with timeouts that complete naturally | ||||||
|  |           ESP_LOGI("test", "Phase 1: Simulating normal component lifecycle"); | ||||||
|  |  | ||||||
|  |           // Sensor update timeouts (common pattern) | ||||||
|  |           App.scheduler.set_timeout(component, "sensor_init", 10, []() { | ||||||
|  |             ESP_LOGD("test", "Sensor initialized"); | ||||||
|  |             id(create_count)++; | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           // Retry timeout (gets cancelled if successful) | ||||||
|  |           App.scheduler.set_timeout(component, "retry_timeout", 50, []() { | ||||||
|  |             ESP_LOGD("test", "Retry timeout executed"); | ||||||
|  |             id(create_count)++; | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           // Simulate successful operation - cancel retry | ||||||
|  |           App.scheduler.set_timeout(component, "success_sim", 20, []() { | ||||||
|  |             ESP_LOGD("test", "Operation succeeded, cancelling retry"); | ||||||
|  |             App.scheduler.cancel_timeout(id(test_sensor), "retry_timeout"); | ||||||
|  |             id(cancel_count)++; | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           id(create_count) += 3; | ||||||
|  |           ESP_LOGI("test", "Phase 1 complete"); | ||||||
|  |  | ||||||
|  |   - id: test_sensor_polling | ||||||
|  |     then: | ||||||
|  |       - lambda: |- | ||||||
|  |           // Simulate sensor polling pattern | ||||||
|  |           ESP_LOGI("test", "Phase 2: Simulating sensor polling patterns"); | ||||||
|  |           auto *component = id(test_sensor); | ||||||
|  |  | ||||||
|  |           // Multiple sensors with different update intervals | ||||||
|  |           // These should only allocate once and reuse the same item for each interval execution | ||||||
|  |           App.scheduler.set_interval(component, "temp_sensor", 10, []() { | ||||||
|  |             ESP_LOGD("test", "Temperature sensor update"); | ||||||
|  |             id(interval_counter)++; | ||||||
|  |             if (id(interval_counter) >= 3) { | ||||||
|  |               App.scheduler.cancel_interval(id(test_sensor), "temp_sensor"); | ||||||
|  |               ESP_LOGD("test", "Temperature sensor stopped"); | ||||||
|  |             } | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           App.scheduler.set_interval(component, "humidity_sensor", 15, []() { | ||||||
|  |             ESP_LOGD("test", "Humidity sensor update"); | ||||||
|  |             id(interval_counter)++; | ||||||
|  |             if (id(interval_counter) >= 5) { | ||||||
|  |               App.scheduler.cancel_interval(id(test_sensor), "humidity_sensor"); | ||||||
|  |               ESP_LOGD("test", "Humidity sensor stopped"); | ||||||
|  |             } | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           // Only 2 allocations for the intervals, no matter how many times they execute | ||||||
|  |           id(create_count) += 2; | ||||||
|  |           ESP_LOGD("test", "Created 2 intervals - they will reuse same items for each execution"); | ||||||
|  |           ESP_LOGI("test", "Phase 2 complete"); | ||||||
|  |  | ||||||
|  |   - id: test_communication_patterns | ||||||
|  |     then: | ||||||
|  |       - lambda: |- | ||||||
|  |           // Simulate communication patterns (WiFi/API reconnects, etc) | ||||||
|  |           ESP_LOGI("test", "Phase 3: Simulating communication patterns"); | ||||||
|  |           auto *component = id(test_sensor); | ||||||
|  |  | ||||||
|  |           // Connection timeout pattern | ||||||
|  |           App.scheduler.set_timeout(component, "connect_timeout", 200, []() { | ||||||
|  |             ESP_LOGD("test", "Connection timeout - would retry"); | ||||||
|  |             id(create_count)++; | ||||||
|  |  | ||||||
|  |             // Schedule retry | ||||||
|  |             App.scheduler.set_timeout(id(test_sensor), "connect_retry", 100, []() { | ||||||
|  |               ESP_LOGD("test", "Retrying connection"); | ||||||
|  |               id(create_count)++; | ||||||
|  |             }); | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           // Heartbeat pattern | ||||||
|  |           App.scheduler.set_interval(component, "heartbeat", 50, []() { | ||||||
|  |             ESP_LOGD("test", "Heartbeat"); | ||||||
|  |             id(interval_counter)++; | ||||||
|  |             if (id(interval_counter) >= 10) { | ||||||
|  |               App.scheduler.cancel_interval(id(test_sensor), "heartbeat"); | ||||||
|  |               ESP_LOGD("test", "Heartbeat stopped"); | ||||||
|  |             } | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           id(create_count) += 2; | ||||||
|  |           ESP_LOGI("test", "Phase 3 complete"); | ||||||
|  |  | ||||||
|  |   - id: test_defer_patterns | ||||||
|  |     then: | ||||||
|  |       - lambda: |- | ||||||
|  |           // Simulate defer patterns (state changes, async operations) | ||||||
|  |           ESP_LOGI("test", "Phase 4: Simulating heavy defer patterns like ratgdo"); | ||||||
|  |  | ||||||
|  |           auto *component = id(test_sensor); | ||||||
|  |  | ||||||
|  |           // Simulate a burst of defer operations like ratgdo does with state updates | ||||||
|  |           // These should execute immediately and recycle quickly to the pool | ||||||
|  |           for (int i = 0; i < 10; i++) { | ||||||
|  |             std::string defer_name = "defer_" + std::to_string(i); | ||||||
|  |             App.scheduler.set_timeout(component, defer_name, 0, [i]() { | ||||||
|  |               ESP_LOGD("test", "Defer %d executed", i); | ||||||
|  |               // Force a small delay between defer executions to see recycling | ||||||
|  |               if (i == 5) { | ||||||
|  |                 ESP_LOGI("test", "Half of defers executed, checking pool status"); | ||||||
|  |               } | ||||||
|  |             }); | ||||||
|  |           } | ||||||
|  |  | ||||||
|  |           id(create_count) += 10; | ||||||
|  |           ESP_LOGD("test", "Created 10 defer operations (0ms timeouts)"); | ||||||
|  |  | ||||||
|  |           // Also create some named defers that might get replaced | ||||||
|  |           App.scheduler.set_timeout(component, "state_update", 0, []() { | ||||||
|  |             ESP_LOGD("test", "State update 1"); | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           // Replace the same named defer (should cancel previous) | ||||||
|  |           App.scheduler.set_timeout(component, "state_update", 0, []() { | ||||||
|  |             ESP_LOGD("test", "State update 2 (replaced)"); | ||||||
|  |           }); | ||||||
|  |  | ||||||
|  |           id(create_count) += 2; | ||||||
|  |           id(cancel_count) += 1; // One cancelled due to replacement | ||||||
|  |  | ||||||
|  |           ESP_LOGI("test", "Phase 4 complete"); | ||||||
|  |  | ||||||
|  |   - id: test_pool_reuse_verification | ||||||
|  |     then: | ||||||
|  |       - lambda: |- | ||||||
|  |           ESP_LOGI("test", "Phase 5: Verifying pool reuse after everything settles"); | ||||||
|  |  | ||||||
|  |           // Cancel any remaining intervals | ||||||
|  |           auto *component = id(test_sensor); | ||||||
|  |           App.scheduler.cancel_interval(component, "temp_sensor"); | ||||||
|  |           App.scheduler.cancel_interval(component, "humidity_sensor"); | ||||||
|  |           App.scheduler.cancel_interval(component, "heartbeat"); | ||||||
|  |  | ||||||
|  |           ESP_LOGD("test", "Cancelled any remaining intervals"); | ||||||
|  |  | ||||||
|  |           // The pool should have items from completed timeouts in earlier phases. | ||||||
|  |           // Phase 1 had 3 timeouts that completed and were recycled. | ||||||
|  |           // Phase 3 had 1 timeout that completed and was recycled. | ||||||
|  |           // Phase 4 had 3 defers that completed and were recycled. | ||||||
|  |           // So we should have a decent pool size already from naturally completed items. | ||||||
|  |  | ||||||
|  |           // Now create 8 new timeouts - they should reuse from pool when available | ||||||
|  |           int reuse_test_count = 8; | ||||||
|  |  | ||||||
|  |           for (int i = 0; i < reuse_test_count; i++) { | ||||||
|  |             std::string name = "reuse_test_" + std::to_string(i); | ||||||
|  |             App.scheduler.set_timeout(component, name, 10 + i * 5, [i]() { | ||||||
|  |               ESP_LOGD("test", "Reuse test %d completed", i); | ||||||
|  |             }); | ||||||
|  |           } | ||||||
|  |  | ||||||
|  |           ESP_LOGI("test", "Created %d items for reuse verification", reuse_test_count); | ||||||
|  |           id(create_count) += reuse_test_count; | ||||||
|  |           ESP_LOGI("test", "Phase 5 complete"); | ||||||
|  |  | ||||||
|  |   - id: test_full_pool_reuse | ||||||
|  |     then: | ||||||
|  |       - lambda: |- | ||||||
|  |           ESP_LOGI("test", "Phase 6: Testing pool size limits after Phase 5 items complete"); | ||||||
|  |  | ||||||
|  |           // At this point, all Phase 5 timeouts should have completed and been recycled. | ||||||
|  |           // The pool should be at its maximum size (5). | ||||||
|  |           // Creating 10 new items tests that: | ||||||
|  |           // - First 5 items reuse from the pool | ||||||
|  |           // - Remaining 5 items allocate new (pool empty) | ||||||
|  |           // - Pool doesn't grow beyond MAX_POOL_SIZE of 5 | ||||||
|  |  | ||||||
|  |           auto *component = id(test_sensor); | ||||||
|  |           int full_reuse_count = 10; | ||||||
|  |  | ||||||
|  |           for (int i = 0; i < full_reuse_count; i++) { | ||||||
|  |             std::string name = "full_reuse_" + std::to_string(i); | ||||||
|  |             App.scheduler.set_timeout(component, name, 10 + i * 5, [i]() { | ||||||
|  |               ESP_LOGD("test", "Full reuse test %d completed", i); | ||||||
|  |             }); | ||||||
|  |           } | ||||||
|  |  | ||||||
|  |           ESP_LOGI("test", "Created %d items for full pool reuse verification", full_reuse_count); | ||||||
|  |           id(create_count) += full_reuse_count; | ||||||
|  |           ESP_LOGI("test", "Phase 6 complete"); | ||||||
|  |  | ||||||
|  |   - id: test_same_defer_optimization | ||||||
|  |     then: | ||||||
|  |       - lambda: |- | ||||||
|  |           ESP_LOGI("test", "Phase 7: Testing same-named defer optimization"); | ||||||
|  |  | ||||||
|  |           auto *component = id(test_sensor); | ||||||
|  |  | ||||||
|  |           // Create 10 defers with the same name - should optimize to update callback in-place | ||||||
|  |           // This pattern is common in components like ratgdo that repeatedly defer state updates | ||||||
|  |           for (int i = 0; i < 10; i++) { | ||||||
|  |             App.scheduler.set_timeout(component, "repeated_defer", 0, [i]() { | ||||||
|  |               ESP_LOGD("test", "Repeated defer executed with value: %d", i); | ||||||
|  |             }); | ||||||
|  |           } | ||||||
|  |  | ||||||
|  |           // Only the first should allocate, the rest should update in-place | ||||||
|  |           // We expect only 1 allocation for all 10 operations | ||||||
|  |           id(create_count) += 1;  // Only count 1 since others should be optimized | ||||||
|  |  | ||||||
|  |           ESP_LOGD("test", "Created 10 same-named defers (should only allocate once)"); | ||||||
|  |           ESP_LOGI("test", "Phase 7 complete"); | ||||||
|  |  | ||||||
|  |   - id: complete_test | ||||||
|  |     then: | ||||||
|  |       - lambda: |- | ||||||
|  |           ESP_LOGI("test", "Pool recycling test complete - created %d items, cancelled %d, intervals %d", | ||||||
|  |                    id(create_count), id(cancel_count), id(interval_counter)); | ||||||
|  |  | ||||||
|  | sensor: | ||||||
|  |   - platform: template | ||||||
|  |     name: Test Sensor | ||||||
|  |     id: test_sensor | ||||||
|  |     lambda: return 1.0; | ||||||
|  |     update_interval: never | ||||||
|  |  | ||||||
|  | # No interval - tests will be triggered from Python via API services | ||||||
| @@ -30,9 +30,15 @@ async def test_gpio_expander_cache( | |||||||
|  |  | ||||||
|     logs_done = asyncio.Event() |     logs_done = asyncio.Event() | ||||||
|  |  | ||||||
|     # Patterns to match in logs |     # Patterns to match in logs - match any variation of digital_read | ||||||
|     digital_read_hw_pattern = re.compile(r"digital_read_hw pin=(\d+)") |     read_hw_pattern = re.compile(r"(?:uint16_)?digital_read_hw pin=(\d+)") | ||||||
|     digital_read_cache_pattern = re.compile(r"digital_read_cache pin=(\d+)") |     read_cache_pattern = re.compile(r"(?:uint16_)?digital_read_cache pin=(\d+)") | ||||||
|  |  | ||||||
|  |     # Keep specific patterns for building the expected order | ||||||
|  |     digital_read_hw_pattern = re.compile(r"^digital_read_hw pin=(\d+)") | ||||||
|  |     digital_read_cache_pattern = re.compile(r"^digital_read_cache pin=(\d+)") | ||||||
|  |     uint16_read_hw_pattern = re.compile(r"^uint16_digital_read_hw pin=(\d+)") | ||||||
|  |     uint16_read_cache_pattern = re.compile(r"^uint16_digital_read_cache pin=(\d+)") | ||||||
|  |  | ||||||
|     # ensure logs are in the expected order |     # ensure logs are in the expected order | ||||||
|     log_order = [ |     log_order = [ | ||||||
| @@ -59,6 +65,17 @@ async def test_gpio_expander_cache( | |||||||
|         (digital_read_cache_pattern, 14), |         (digital_read_cache_pattern, 14), | ||||||
|         (digital_read_hw_pattern, 14), |         (digital_read_hw_pattern, 14), | ||||||
|         (digital_read_cache_pattern, 14), |         (digital_read_cache_pattern, 14), | ||||||
|  |         # uint16_t component tests (single bank of 16 pins) | ||||||
|  |         (uint16_read_hw_pattern, 0),  # First pin triggers hw read | ||||||
|  |         [ | ||||||
|  |             (uint16_read_cache_pattern, i) for i in range(0, 16) | ||||||
|  |         ],  # All 16 pins return via cache | ||||||
|  |         # After cache reset | ||||||
|  |         (uint16_read_hw_pattern, 5),  # First read after reset triggers hw | ||||||
|  |         (uint16_read_cache_pattern, 5), | ||||||
|  |         (uint16_read_cache_pattern, 10),  # These use cache (same bank) | ||||||
|  |         (uint16_read_cache_pattern, 15), | ||||||
|  |         (uint16_read_cache_pattern, 0), | ||||||
|     ] |     ] | ||||||
|     # Flatten the log order for easier processing |     # Flatten the log order for easier processing | ||||||
|     log_order: list[tuple[re.Pattern, int]] = [ |     log_order: list[tuple[re.Pattern, int]] = [ | ||||||
| @@ -77,17 +94,22 @@ async def test_gpio_expander_cache( | |||||||
|  |  | ||||||
|         clean_line = re.sub(r"\x1b\[[0-9;]*m", "", line) |         clean_line = re.sub(r"\x1b\[[0-9;]*m", "", line) | ||||||
|  |  | ||||||
|         if "digital_read" in clean_line: |         # Extract just the log message part (after the log level) | ||||||
|  |         msg = clean_line.split(": ", 1)[-1] if ": " in clean_line else clean_line | ||||||
|  |  | ||||||
|  |         # Check if this line contains a read operation we're tracking | ||||||
|  |         if read_hw_pattern.search(msg) or read_cache_pattern.search(msg): | ||||||
|             if index >= len(log_order): |             if index >= len(log_order): | ||||||
|                 print(f"Received unexpected log line: {clean_line}") |                 print(f"Received unexpected log line: {msg}") | ||||||
|                 logs_done.set() |                 logs_done.set() | ||||||
|                 return |                 return | ||||||
|  |  | ||||||
|             pattern, expected_pin = log_order[index] |             pattern, expected_pin = log_order[index] | ||||||
|             match = pattern.search(clean_line) |             match = pattern.search(msg) | ||||||
|  |  | ||||||
|             if not match: |             if not match: | ||||||
|                 print(f"Log line did not match next expected pattern: {clean_line}") |                 print(f"Log line did not match next expected pattern: {msg}") | ||||||
|  |                 print(f"Expected pattern: {pattern.pattern}") | ||||||
|                 logs_done.set() |                 logs_done.set() | ||||||
|                 return |                 return | ||||||
|  |  | ||||||
| @@ -99,9 +121,10 @@ async def test_gpio_expander_cache( | |||||||
|  |  | ||||||
|             index += 1 |             index += 1 | ||||||
|  |  | ||||||
|         elif "DONE" in clean_line: |         elif "DONE_UINT16" in clean_line: | ||||||
|             # Check if we reached the end of the expected log entries |             # uint16 component is done, check if we've seen all expected logs | ||||||
|             logs_done.set() |             if index == len(log_order): | ||||||
|  |                 logs_done.set() | ||||||
|  |  | ||||||
|     # Run with log monitoring |     # Run with log monitoring | ||||||
|     async with ( |     async with ( | ||||||
|   | |||||||
							
								
								
									
										209
									
								
								tests/integration/test_scheduler_pool.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										209
									
								
								tests/integration/test_scheduler_pool.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,209 @@ | |||||||
|  | """Integration test for scheduler memory pool functionality.""" | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import asyncio | ||||||
|  | import re | ||||||
|  |  | ||||||
|  | import pytest | ||||||
|  |  | ||||||
|  | from .types import APIClientConnectedFactory, RunCompiledFunction | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @pytest.mark.asyncio | ||||||
|  | async def test_scheduler_pool( | ||||||
|  |     yaml_config: str, | ||||||
|  |     run_compiled: RunCompiledFunction, | ||||||
|  |     api_client_connected: APIClientConnectedFactory, | ||||||
|  | ) -> None: | ||||||
|  |     """Test that the scheduler memory pool is working correctly with realistic usage. | ||||||
|  |  | ||||||
|  |     This test simulates real-world scheduler usage patterns and verifies that: | ||||||
|  |     1. Items are recycled to the pool when timeouts complete naturally | ||||||
|  |     2. Items are recycled when intervals/timeouts are cancelled | ||||||
|  |     3. Items are reused from the pool for new scheduler operations | ||||||
|  |     4. The pool grows gradually based on actual usage patterns | ||||||
|  |     5. Pool operations are logged correctly with debug scheduler enabled | ||||||
|  |     """ | ||||||
|  |     # Track log messages to verify pool behavior | ||||||
|  |     log_lines: list[str] = [] | ||||||
|  |     pool_reuse_count = 0 | ||||||
|  |     pool_recycle_count = 0 | ||||||
|  |     pool_full_count = 0 | ||||||
|  |     new_alloc_count = 0 | ||||||
|  |  | ||||||
|  |     # Patterns to match pool operations | ||||||
|  |     reuse_pattern = re.compile(r"Reused item from pool \(pool size now: (\d+)\)") | ||||||
|  |     recycle_pattern = re.compile(r"Recycled item to pool \(pool size now: (\d+)\)") | ||||||
|  |     pool_full_pattern = re.compile(r"Pool full \(size: (\d+)\), deleting item") | ||||||
|  |     new_alloc_pattern = re.compile(r"Allocated new item \(pool empty\)") | ||||||
|  |  | ||||||
|  |     # Futures to track when test phases complete | ||||||
|  |     loop = asyncio.get_running_loop() | ||||||
|  |     test_complete_future: asyncio.Future[bool] = loop.create_future() | ||||||
|  |     phase_futures = { | ||||||
|  |         1: loop.create_future(), | ||||||
|  |         2: loop.create_future(), | ||||||
|  |         3: loop.create_future(), | ||||||
|  |         4: loop.create_future(), | ||||||
|  |         5: loop.create_future(), | ||||||
|  |         6: loop.create_future(), | ||||||
|  |         7: loop.create_future(), | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     def check_output(line: str) -> None: | ||||||
|  |         """Check log output for pool operations and phase completion.""" | ||||||
|  |         nonlocal pool_reuse_count, pool_recycle_count, pool_full_count, new_alloc_count | ||||||
|  |         log_lines.append(line) | ||||||
|  |  | ||||||
|  |         # Track pool operations | ||||||
|  |         if reuse_pattern.search(line): | ||||||
|  |             pool_reuse_count += 1 | ||||||
|  |  | ||||||
|  |         elif recycle_pattern.search(line): | ||||||
|  |             pool_recycle_count += 1 | ||||||
|  |  | ||||||
|  |         elif pool_full_pattern.search(line): | ||||||
|  |             pool_full_count += 1 | ||||||
|  |  | ||||||
|  |         elif new_alloc_pattern.search(line): | ||||||
|  |             new_alloc_count += 1 | ||||||
|  |  | ||||||
|  |         # Track phase completion | ||||||
|  |         for phase_num in range(1, 8): | ||||||
|  |             if ( | ||||||
|  |                 f"Phase {phase_num} complete" in line | ||||||
|  |                 and phase_num in phase_futures | ||||||
|  |                 and not phase_futures[phase_num].done() | ||||||
|  |             ): | ||||||
|  |                 phase_futures[phase_num].set_result(True) | ||||||
|  |  | ||||||
|  |         # Check for test completion | ||||||
|  |         if "Pool recycling test complete" in line and not test_complete_future.done(): | ||||||
|  |             test_complete_future.set_result(True) | ||||||
|  |  | ||||||
|  |     # Run the test with log monitoring | ||||||
|  |     async with ( | ||||||
|  |         run_compiled(yaml_config, line_callback=check_output), | ||||||
|  |         api_client_connected() as client, | ||||||
|  |     ): | ||||||
|  |         # Verify device is running | ||||||
|  |         device_info = await client.device_info() | ||||||
|  |         assert device_info is not None | ||||||
|  |         assert device_info.name == "scheduler-pool-test" | ||||||
|  |  | ||||||
|  |         # Get list of services | ||||||
|  |         entities, services = await client.list_entities_services() | ||||||
|  |         service_names = {s.name for s in services} | ||||||
|  |  | ||||||
|  |         # Verify all test services are available | ||||||
|  |         expected_services = { | ||||||
|  |             "run_phase_1", | ||||||
|  |             "run_phase_2", | ||||||
|  |             "run_phase_3", | ||||||
|  |             "run_phase_4", | ||||||
|  |             "run_phase_5", | ||||||
|  |             "run_phase_6", | ||||||
|  |             "run_phase_7", | ||||||
|  |             "run_complete", | ||||||
|  |         } | ||||||
|  |         assert expected_services.issubset(service_names), ( | ||||||
|  |             f"Missing services: {expected_services - service_names}" | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |         # Get service objects | ||||||
|  |         phase_services = { | ||||||
|  |             num: next(s for s in services if s.name == f"run_phase_{num}") | ||||||
|  |             for num in range(1, 8) | ||||||
|  |         } | ||||||
|  |         complete_service = next(s for s in services if s.name == "run_complete") | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             # Phase 1: Component lifecycle | ||||||
|  |             client.execute_service(phase_services[1], {}) | ||||||
|  |             await asyncio.wait_for(phase_futures[1], timeout=1.0) | ||||||
|  |             await asyncio.sleep(0.05)  # Let timeouts complete | ||||||
|  |  | ||||||
|  |             # Phase 2: Sensor polling | ||||||
|  |             client.execute_service(phase_services[2], {}) | ||||||
|  |             await asyncio.wait_for(phase_futures[2], timeout=1.0) | ||||||
|  |             await asyncio.sleep(0.1)  # Let intervals run a bit | ||||||
|  |  | ||||||
|  |             # Phase 3: Communication patterns | ||||||
|  |             client.execute_service(phase_services[3], {}) | ||||||
|  |             await asyncio.wait_for(phase_futures[3], timeout=1.0) | ||||||
|  |             await asyncio.sleep(0.1)  # Let heartbeat run | ||||||
|  |  | ||||||
|  |             # Phase 4: Defer patterns | ||||||
|  |             client.execute_service(phase_services[4], {}) | ||||||
|  |             await asyncio.wait_for(phase_futures[4], timeout=1.0) | ||||||
|  |             await asyncio.sleep(0.2)  # Let everything settle and recycle | ||||||
|  |  | ||||||
|  |             # Phase 5: Pool reuse verification | ||||||
|  |             client.execute_service(phase_services[5], {}) | ||||||
|  |             await asyncio.wait_for(phase_futures[5], timeout=1.0) | ||||||
|  |             await asyncio.sleep(0.1)  # Let Phase 5 timeouts complete and recycle | ||||||
|  |  | ||||||
|  |             # Phase 6: Full pool reuse verification | ||||||
|  |             client.execute_service(phase_services[6], {}) | ||||||
|  |             await asyncio.wait_for(phase_futures[6], timeout=1.0) | ||||||
|  |             await asyncio.sleep(0.1)  # Let Phase 6 timeouts complete | ||||||
|  |  | ||||||
|  |             # Phase 7: Same-named defer optimization | ||||||
|  |             client.execute_service(phase_services[7], {}) | ||||||
|  |             await asyncio.wait_for(phase_futures[7], timeout=1.0) | ||||||
|  |             await asyncio.sleep(0.05)  # Let the single defer execute | ||||||
|  |  | ||||||
|  |             # Complete test | ||||||
|  |             client.execute_service(complete_service, {}) | ||||||
|  |             await asyncio.wait_for(test_complete_future, timeout=0.5) | ||||||
|  |  | ||||||
|  |         except TimeoutError as e: | ||||||
|  |             # Print debug info if test times out | ||||||
|  |             recent_logs = "\n".join(log_lines[-30:]) | ||||||
|  |             phases_completed = [num for num, fut in phase_futures.items() if fut.done()] | ||||||
|  |             pytest.fail( | ||||||
|  |                 f"Test timed out waiting for phase/completion. Error: {e}\n" | ||||||
|  |                 f"  Phases completed: {phases_completed}\n" | ||||||
|  |                 f"  Pool stats:\n" | ||||||
|  |                 f"    Reuse count: {pool_reuse_count}\n" | ||||||
|  |                 f"    Recycle count: {pool_recycle_count}\n" | ||||||
|  |                 f"    Pool full count: {pool_full_count}\n" | ||||||
|  |                 f"    New alloc count: {new_alloc_count}\n" | ||||||
|  |                 f"Recent logs:\n{recent_logs}" | ||||||
|  |             ) | ||||||
|  |  | ||||||
|  |     # Verify all test phases ran | ||||||
|  |     for phase_num in range(1, 8): | ||||||
|  |         assert phase_futures[phase_num].done(), f"Phase {phase_num} did not complete" | ||||||
|  |  | ||||||
|  |     # Verify pool behavior | ||||||
|  |     assert pool_recycle_count > 0, "Should have recycled items to pool" | ||||||
|  |  | ||||||
|  |     # Check pool metrics | ||||||
|  |     if pool_recycle_count > 0: | ||||||
|  |         max_pool_size = 0 | ||||||
|  |         for line in log_lines: | ||||||
|  |             if match := recycle_pattern.search(line): | ||||||
|  |                 size = int(match.group(1)) | ||||||
|  |                 max_pool_size = max(max_pool_size, size) | ||||||
|  |  | ||||||
|  |         # Pool can grow up to its maximum of 5 | ||||||
|  |         assert max_pool_size <= 5, f"Pool grew beyond maximum ({max_pool_size})" | ||||||
|  |  | ||||||
|  |     # Log summary for debugging | ||||||
|  |     print("\nScheduler Pool Test Summary (Python Orchestrated):") | ||||||
|  |     print(f"  Items recycled to pool: {pool_recycle_count}") | ||||||
|  |     print(f"  Items reused from pool: {pool_reuse_count}") | ||||||
|  |     print(f"  Pool full events: {pool_full_count}") | ||||||
|  |     print(f"  New allocations: {new_alloc_count}") | ||||||
|  |     print("  All phases completed successfully") | ||||||
|  |  | ||||||
|  |     # Verify reuse happened | ||||||
|  |     if pool_reuse_count == 0 and pool_recycle_count > 3: | ||||||
|  |         pytest.fail("Pool had items recycled but none were reused") | ||||||
|  |  | ||||||
|  |     # Success - pool is working | ||||||
|  |     assert pool_recycle_count > 0 or new_alloc_count < 15, ( | ||||||
|  |         "Pool should either recycle items or limit new allocations" | ||||||
|  |     ) | ||||||
| @@ -1,8 +1,14 @@ | |||||||
|  | import logging | ||||||
|  | import socket | ||||||
|  | from unittest.mock import patch | ||||||
|  |  | ||||||
|  | from aioesphomeapi.host_resolver import AddrInfo, IPv4Sockaddr, IPv6Sockaddr | ||||||
| from hypothesis import given | from hypothesis import given | ||||||
| from hypothesis.strategies import ip_addresses | from hypothesis.strategies import ip_addresses | ||||||
| import pytest | import pytest | ||||||
|  |  | ||||||
| from esphome import helpers | from esphome import helpers | ||||||
|  | from esphome.core import EsphomeError | ||||||
|  |  | ||||||
|  |  | ||||||
| @pytest.mark.parametrize( | @pytest.mark.parametrize( | ||||||
| @@ -277,3 +283,314 @@ def test_sort_ip_addresses(text: list[str], expected: list[str]) -> None: | |||||||
|     actual = helpers.sort_ip_addresses(text) |     actual = helpers.sort_ip_addresses(text) | ||||||
|  |  | ||||||
|     assert actual == expected |     assert actual == expected | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # DNS resolution tests | ||||||
|  | def test_is_ip_address_ipv4() -> None: | ||||||
|  |     """Test is_ip_address with IPv4 addresses.""" | ||||||
|  |     assert helpers.is_ip_address("192.168.1.1") is True | ||||||
|  |     assert helpers.is_ip_address("127.0.0.1") is True | ||||||
|  |     assert helpers.is_ip_address("255.255.255.255") is True | ||||||
|  |     assert helpers.is_ip_address("0.0.0.0") is True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_is_ip_address_ipv6() -> None: | ||||||
|  |     """Test is_ip_address with IPv6 addresses.""" | ||||||
|  |     assert helpers.is_ip_address("::1") is True | ||||||
|  |     assert helpers.is_ip_address("2001:db8::1") is True | ||||||
|  |     assert helpers.is_ip_address("fe80::1") is True | ||||||
|  |     assert helpers.is_ip_address("::") is True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_is_ip_address_invalid() -> None: | ||||||
|  |     """Test is_ip_address with non-IP strings.""" | ||||||
|  |     assert helpers.is_ip_address("hostname") is False | ||||||
|  |     assert helpers.is_ip_address("hostname.local") is False | ||||||
|  |     assert helpers.is_ip_address("256.256.256.256") is False | ||||||
|  |     assert helpers.is_ip_address("192.168.1") is False | ||||||
|  |     assert helpers.is_ip_address("") is False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_single_ipv4() -> None: | ||||||
|  |     """Test resolving a single IPv4 address (fast path).""" | ||||||
|  |     result = helpers.resolve_ip_address("192.168.1.100", 6053) | ||||||
|  |  | ||||||
|  |     assert len(result) == 1 | ||||||
|  |     assert result[0][0] == socket.AF_INET  # family | ||||||
|  |     assert result[0][1] in ( | ||||||
|  |         0, | ||||||
|  |         socket.SOCK_STREAM, | ||||||
|  |     )  # type (0 on Windows with AI_NUMERICHOST) | ||||||
|  |     assert result[0][2] in ( | ||||||
|  |         0, | ||||||
|  |         socket.IPPROTO_TCP, | ||||||
|  |     )  # proto (0 on Windows with AI_NUMERICHOST) | ||||||
|  |     assert result[0][3] == ""  # canonname | ||||||
|  |     assert result[0][4] == ("192.168.1.100", 6053)  # sockaddr | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_single_ipv6() -> None: | ||||||
|  |     """Test resolving a single IPv6 address (fast path).""" | ||||||
|  |     result = helpers.resolve_ip_address("::1", 6053) | ||||||
|  |  | ||||||
|  |     assert len(result) == 1 | ||||||
|  |     assert result[0][0] == socket.AF_INET6  # family | ||||||
|  |     assert result[0][1] in ( | ||||||
|  |         0, | ||||||
|  |         socket.SOCK_STREAM, | ||||||
|  |     )  # type (0 on Windows with AI_NUMERICHOST) | ||||||
|  |     assert result[0][2] in ( | ||||||
|  |         0, | ||||||
|  |         socket.IPPROTO_TCP, | ||||||
|  |     )  # proto (0 on Windows with AI_NUMERICHOST) | ||||||
|  |     assert result[0][3] == ""  # canonname | ||||||
|  |     # IPv6 sockaddr has 4 elements | ||||||
|  |     assert len(result[0][4]) == 4 | ||||||
|  |     assert result[0][4][0] == "::1"  # address | ||||||
|  |     assert result[0][4][1] == 6053  # port | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_list_of_ips() -> None: | ||||||
|  |     """Test resolving a list of IP addresses (fast path).""" | ||||||
|  |     ips = ["192.168.1.100", "10.0.0.1", "::1"] | ||||||
|  |     result = helpers.resolve_ip_address(ips, 6053) | ||||||
|  |  | ||||||
|  |     # Should return results sorted by preference (IPv6 first, then IPv4) | ||||||
|  |     assert len(result) >= 2  # At least IPv4 addresses should work | ||||||
|  |  | ||||||
|  |     # Check that results are properly formatted | ||||||
|  |     for addr_info in result: | ||||||
|  |         assert addr_info[0] in (socket.AF_INET, socket.AF_INET6) | ||||||
|  |         assert addr_info[1] in ( | ||||||
|  |             0, | ||||||
|  |             socket.SOCK_STREAM, | ||||||
|  |         )  # 0 on Windows with AI_NUMERICHOST | ||||||
|  |         assert addr_info[2] in ( | ||||||
|  |             0, | ||||||
|  |             socket.IPPROTO_TCP, | ||||||
|  |         )  # 0 on Windows with AI_NUMERICHOST | ||||||
|  |         assert addr_info[3] == "" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_with_getaddrinfo_failure(caplog) -> None: | ||||||
|  |     """Test that getaddrinfo OSError is handled gracefully in fast path.""" | ||||||
|  |     with ( | ||||||
|  |         caplog.at_level(logging.DEBUG), | ||||||
|  |         patch("socket.getaddrinfo") as mock_getaddrinfo, | ||||||
|  |     ): | ||||||
|  |         # First IP succeeds | ||||||
|  |         mock_getaddrinfo.side_effect = [ | ||||||
|  |             [ | ||||||
|  |                 ( | ||||||
|  |                     socket.AF_INET, | ||||||
|  |                     socket.SOCK_STREAM, | ||||||
|  |                     socket.IPPROTO_TCP, | ||||||
|  |                     "", | ||||||
|  |                     ("192.168.1.100", 6053), | ||||||
|  |                 ) | ||||||
|  |             ], | ||||||
|  |             OSError("Failed to resolve"),  # Second IP fails | ||||||
|  |         ] | ||||||
|  |  | ||||||
|  |         # Should continue despite one failure | ||||||
|  |         result = helpers.resolve_ip_address(["192.168.1.100", "192.168.1.101"], 6053) | ||||||
|  |  | ||||||
|  |         # Should have result from first IP only | ||||||
|  |         assert len(result) == 1 | ||||||
|  |         assert result[0][4][0] == "192.168.1.100" | ||||||
|  |  | ||||||
|  |         # Verify both IPs were attempted | ||||||
|  |         assert mock_getaddrinfo.call_count == 2 | ||||||
|  |         mock_getaddrinfo.assert_any_call( | ||||||
|  |             "192.168.1.100", 6053, proto=socket.IPPROTO_TCP, flags=socket.AI_NUMERICHOST | ||||||
|  |         ) | ||||||
|  |         mock_getaddrinfo.assert_any_call( | ||||||
|  |             "192.168.1.101", 6053, proto=socket.IPPROTO_TCP, flags=socket.AI_NUMERICHOST | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |         # Verify the debug log was called for the failed IP | ||||||
|  |         assert "Failed to parse IP address '192.168.1.101'" in caplog.text | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_hostname() -> None: | ||||||
|  |     """Test resolving a hostname (async resolver path).""" | ||||||
|  |     mock_addr_info = AddrInfo( | ||||||
|  |         family=socket.AF_INET, | ||||||
|  |         type=socket.SOCK_STREAM, | ||||||
|  |         proto=socket.IPPROTO_TCP, | ||||||
|  |         sockaddr=IPv4Sockaddr(address="192.168.1.100", port=6053), | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |     with patch("esphome.resolver.AsyncResolver") as MockResolver: | ||||||
|  |         mock_resolver = MockResolver.return_value | ||||||
|  |         mock_resolver.resolve.return_value = [mock_addr_info] | ||||||
|  |  | ||||||
|  |         result = helpers.resolve_ip_address("test.local", 6053) | ||||||
|  |  | ||||||
|  |         assert len(result) == 1 | ||||||
|  |         assert result[0][0] == socket.AF_INET | ||||||
|  |         assert result[0][4] == ("192.168.1.100", 6053) | ||||||
|  |         MockResolver.assert_called_once_with(["test.local"], 6053) | ||||||
|  |         mock_resolver.resolve.assert_called_once() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_mixed_list() -> None: | ||||||
|  |     """Test resolving a mix of IPs and hostnames.""" | ||||||
|  |     mock_addr_info = AddrInfo( | ||||||
|  |         family=socket.AF_INET, | ||||||
|  |         type=socket.SOCK_STREAM, | ||||||
|  |         proto=socket.IPPROTO_TCP, | ||||||
|  |         sockaddr=IPv4Sockaddr(address="192.168.1.200", port=6053), | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |     with patch("esphome.resolver.AsyncResolver") as MockResolver: | ||||||
|  |         mock_resolver = MockResolver.return_value | ||||||
|  |         mock_resolver.resolve.return_value = [mock_addr_info] | ||||||
|  |  | ||||||
|  |         # Mix of IP and hostname - should use async resolver | ||||||
|  |         result = helpers.resolve_ip_address(["192.168.1.100", "test.local"], 6053) | ||||||
|  |  | ||||||
|  |         assert len(result) == 1 | ||||||
|  |         assert result[0][4][0] == "192.168.1.200" | ||||||
|  |         MockResolver.assert_called_once_with(["192.168.1.100", "test.local"], 6053) | ||||||
|  |         mock_resolver.resolve.assert_called_once() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_url() -> None: | ||||||
|  |     """Test extracting hostname from URL.""" | ||||||
|  |     mock_addr_info = AddrInfo( | ||||||
|  |         family=socket.AF_INET, | ||||||
|  |         type=socket.SOCK_STREAM, | ||||||
|  |         proto=socket.IPPROTO_TCP, | ||||||
|  |         sockaddr=IPv4Sockaddr(address="192.168.1.100", port=6053), | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |     with patch("esphome.resolver.AsyncResolver") as MockResolver: | ||||||
|  |         mock_resolver = MockResolver.return_value | ||||||
|  |         mock_resolver.resolve.return_value = [mock_addr_info] | ||||||
|  |  | ||||||
|  |         result = helpers.resolve_ip_address("http://test.local", 6053) | ||||||
|  |  | ||||||
|  |         assert len(result) == 1 | ||||||
|  |         MockResolver.assert_called_once_with(["test.local"], 6053) | ||||||
|  |         mock_resolver.resolve.assert_called_once() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_ipv6_conversion() -> None: | ||||||
|  |     """Test proper IPv6 address info conversion.""" | ||||||
|  |     mock_addr_info = AddrInfo( | ||||||
|  |         family=socket.AF_INET6, | ||||||
|  |         type=socket.SOCK_STREAM, | ||||||
|  |         proto=socket.IPPROTO_TCP, | ||||||
|  |         sockaddr=IPv6Sockaddr(address="2001:db8::1", port=6053, flowinfo=1, scope_id=2), | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |     with patch("esphome.resolver.AsyncResolver") as MockResolver: | ||||||
|  |         mock_resolver = MockResolver.return_value | ||||||
|  |         mock_resolver.resolve.return_value = [mock_addr_info] | ||||||
|  |  | ||||||
|  |         result = helpers.resolve_ip_address("test.local", 6053) | ||||||
|  |  | ||||||
|  |         assert len(result) == 1 | ||||||
|  |         assert result[0][0] == socket.AF_INET6 | ||||||
|  |         assert result[0][4] == ("2001:db8::1", 6053, 1, 2) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_error_handling() -> None: | ||||||
|  |     """Test error handling from AsyncResolver.""" | ||||||
|  |     with patch("esphome.resolver.AsyncResolver") as MockResolver: | ||||||
|  |         mock_resolver = MockResolver.return_value | ||||||
|  |         mock_resolver.resolve.side_effect = EsphomeError("Resolution failed") | ||||||
|  |  | ||||||
|  |         with pytest.raises(EsphomeError, match="Resolution failed"): | ||||||
|  |             helpers.resolve_ip_address("test.local", 6053) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_addr_preference_ipv4() -> None: | ||||||
|  |     """Test address preference for IPv4.""" | ||||||
|  |     addr_info = ( | ||||||
|  |         socket.AF_INET, | ||||||
|  |         socket.SOCK_STREAM, | ||||||
|  |         socket.IPPROTO_TCP, | ||||||
|  |         "", | ||||||
|  |         ("192.168.1.1", 6053), | ||||||
|  |     ) | ||||||
|  |     assert helpers.addr_preference_(addr_info) == 2 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_addr_preference_ipv6() -> None: | ||||||
|  |     """Test address preference for regular IPv6.""" | ||||||
|  |     addr_info = ( | ||||||
|  |         socket.AF_INET6, | ||||||
|  |         socket.SOCK_STREAM, | ||||||
|  |         socket.IPPROTO_TCP, | ||||||
|  |         "", | ||||||
|  |         ("2001:db8::1", 6053, 0, 0), | ||||||
|  |     ) | ||||||
|  |     assert helpers.addr_preference_(addr_info) == 1 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_addr_preference_ipv6_link_local_no_scope() -> None: | ||||||
|  |     """Test address preference for link-local IPv6 without scope.""" | ||||||
|  |     addr_info = ( | ||||||
|  |         socket.AF_INET6, | ||||||
|  |         socket.SOCK_STREAM, | ||||||
|  |         socket.IPPROTO_TCP, | ||||||
|  |         "", | ||||||
|  |         ("fe80::1", 6053, 0, 0),  # link-local with scope_id=0 | ||||||
|  |     ) | ||||||
|  |     assert helpers.addr_preference_(addr_info) == 3 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_addr_preference_ipv6_link_local_with_scope() -> None: | ||||||
|  |     """Test address preference for link-local IPv6 with scope.""" | ||||||
|  |     addr_info = ( | ||||||
|  |         socket.AF_INET6, | ||||||
|  |         socket.SOCK_STREAM, | ||||||
|  |         socket.IPPROTO_TCP, | ||||||
|  |         "", | ||||||
|  |         ("fe80::1", 6053, 0, 2),  # link-local with scope_id=2 | ||||||
|  |     ) | ||||||
|  |     assert helpers.addr_preference_(addr_info) == 1  # Has scope, so it's usable | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_resolve_ip_address_sorting() -> None: | ||||||
|  |     """Test that results are sorted by preference.""" | ||||||
|  |     # Create multiple address infos with different preferences | ||||||
|  |     mock_addr_infos = [ | ||||||
|  |         AddrInfo( | ||||||
|  |             family=socket.AF_INET6, | ||||||
|  |             type=socket.SOCK_STREAM, | ||||||
|  |             proto=socket.IPPROTO_TCP, | ||||||
|  |             sockaddr=IPv6Sockaddr( | ||||||
|  |                 address="fe80::1", port=6053, flowinfo=0, scope_id=0 | ||||||
|  |             ),  # Preference 3 (link-local no scope) | ||||||
|  |         ), | ||||||
|  |         AddrInfo( | ||||||
|  |             family=socket.AF_INET, | ||||||
|  |             type=socket.SOCK_STREAM, | ||||||
|  |             proto=socket.IPPROTO_TCP, | ||||||
|  |             sockaddr=IPv4Sockaddr( | ||||||
|  |                 address="192.168.1.100", port=6053 | ||||||
|  |             ),  # Preference 2 (IPv4) | ||||||
|  |         ), | ||||||
|  |         AddrInfo( | ||||||
|  |             family=socket.AF_INET6, | ||||||
|  |             type=socket.SOCK_STREAM, | ||||||
|  |             proto=socket.IPPROTO_TCP, | ||||||
|  |             sockaddr=IPv6Sockaddr( | ||||||
|  |                 address="2001:db8::1", port=6053, flowinfo=0, scope_id=0 | ||||||
|  |             ),  # Preference 1 (IPv6) | ||||||
|  |         ), | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |     with patch("esphome.resolver.AsyncResolver") as MockResolver: | ||||||
|  |         mock_resolver = MockResolver.return_value | ||||||
|  |         mock_resolver.resolve.return_value = mock_addr_infos | ||||||
|  |  | ||||||
|  |         result = helpers.resolve_ip_address("test.local", 6053) | ||||||
|  |  | ||||||
|  |         # Should be sorted: IPv6 first, then IPv4, then link-local without scope | ||||||
|  |         assert result[0][4][0] == "2001:db8::1"  # IPv6 (preference 1) | ||||||
|  |         assert result[1][4][0] == "192.168.1.100"  # IPv4 (preference 2) | ||||||
|  |         assert result[2][4][0] == "fe80::1"  # Link-local no scope (preference 3) | ||||||
|   | |||||||
							
								
								
									
										169
									
								
								tests/unit_tests/test_resolver.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										169
									
								
								tests/unit_tests/test_resolver.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,169 @@ | |||||||
|  | """Tests for the DNS resolver module.""" | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import re | ||||||
|  | import socket | ||||||
|  | from unittest.mock import patch | ||||||
|  |  | ||||||
|  | from aioesphomeapi.core import ResolveAPIError, ResolveTimeoutAPIError | ||||||
|  | from aioesphomeapi.host_resolver import AddrInfo, IPv4Sockaddr, IPv6Sockaddr | ||||||
|  | import pytest | ||||||
|  |  | ||||||
|  | from esphome.core import EsphomeError | ||||||
|  | from esphome.resolver import RESOLVE_TIMEOUT, AsyncResolver | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @pytest.fixture | ||||||
|  | def mock_addr_info_ipv4() -> AddrInfo: | ||||||
|  |     """Create a mock IPv4 AddrInfo.""" | ||||||
|  |     return AddrInfo( | ||||||
|  |         family=socket.AF_INET, | ||||||
|  |         type=socket.SOCK_STREAM, | ||||||
|  |         proto=socket.IPPROTO_TCP, | ||||||
|  |         sockaddr=IPv4Sockaddr(address="192.168.1.100", port=6053), | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @pytest.fixture | ||||||
|  | def mock_addr_info_ipv6() -> AddrInfo: | ||||||
|  |     """Create a mock IPv6 AddrInfo.""" | ||||||
|  |     return AddrInfo( | ||||||
|  |         family=socket.AF_INET6, | ||||||
|  |         type=socket.SOCK_STREAM, | ||||||
|  |         proto=socket.IPPROTO_TCP, | ||||||
|  |         sockaddr=IPv6Sockaddr(address="2001:db8::1", port=6053, flowinfo=0, scope_id=0), | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_async_resolver_successful_resolution(mock_addr_info_ipv4: AddrInfo) -> None: | ||||||
|  |     """Test successful DNS resolution.""" | ||||||
|  |     with patch( | ||||||
|  |         "esphome.resolver.hr.async_resolve_host", | ||||||
|  |         return_value=[mock_addr_info_ipv4], | ||||||
|  |     ) as mock_resolve: | ||||||
|  |         resolver = AsyncResolver(["test.local"], 6053) | ||||||
|  |         result = resolver.resolve() | ||||||
|  |  | ||||||
|  |         assert result == [mock_addr_info_ipv4] | ||||||
|  |         mock_resolve.assert_called_once_with( | ||||||
|  |             ["test.local"], 6053, timeout=RESOLVE_TIMEOUT | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_async_resolver_multiple_hosts( | ||||||
|  |     mock_addr_info_ipv4: AddrInfo, mock_addr_info_ipv6: AddrInfo | ||||||
|  | ) -> None: | ||||||
|  |     """Test resolving multiple hosts.""" | ||||||
|  |     mock_results = [mock_addr_info_ipv4, mock_addr_info_ipv6] | ||||||
|  |  | ||||||
|  |     with patch( | ||||||
|  |         "esphome.resolver.hr.async_resolve_host", | ||||||
|  |         return_value=mock_results, | ||||||
|  |     ) as mock_resolve: | ||||||
|  |         resolver = AsyncResolver(["test1.local", "test2.local"], 6053) | ||||||
|  |         result = resolver.resolve() | ||||||
|  |  | ||||||
|  |         assert result == mock_results | ||||||
|  |         mock_resolve.assert_called_once_with( | ||||||
|  |             ["test1.local", "test2.local"], 6053, timeout=RESOLVE_TIMEOUT | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_async_resolver_resolve_api_error() -> None: | ||||||
|  |     """Test handling of ResolveAPIError.""" | ||||||
|  |     error_msg = "Failed to resolve" | ||||||
|  |     with patch( | ||||||
|  |         "esphome.resolver.hr.async_resolve_host", | ||||||
|  |         side_effect=ResolveAPIError(error_msg), | ||||||
|  |     ): | ||||||
|  |         resolver = AsyncResolver(["test.local"], 6053) | ||||||
|  |         with pytest.raises( | ||||||
|  |             EsphomeError, match=re.escape(f"Error resolving IP address: {error_msg}") | ||||||
|  |         ): | ||||||
|  |             resolver.resolve() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_async_resolver_timeout_error() -> None: | ||||||
|  |     """Test handling of ResolveTimeoutAPIError.""" | ||||||
|  |     error_msg = "Resolution timed out" | ||||||
|  |  | ||||||
|  |     with patch( | ||||||
|  |         "esphome.resolver.hr.async_resolve_host", | ||||||
|  |         side_effect=ResolveTimeoutAPIError(error_msg), | ||||||
|  |     ): | ||||||
|  |         resolver = AsyncResolver(["test.local"], 6053) | ||||||
|  |         # Match either "Timeout" or "Error" since ResolveTimeoutAPIError is a subclass of ResolveAPIError | ||||||
|  |         # and depending on import order/test execution context, it might be caught as either | ||||||
|  |         with pytest.raises( | ||||||
|  |             EsphomeError, | ||||||
|  |             match=f"(Timeout|Error) resolving IP address: {re.escape(error_msg)}", | ||||||
|  |         ): | ||||||
|  |             resolver.resolve() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_async_resolver_generic_exception() -> None: | ||||||
|  |     """Test handling of generic exceptions.""" | ||||||
|  |     error = RuntimeError("Unexpected error") | ||||||
|  |     with patch( | ||||||
|  |         "esphome.resolver.hr.async_resolve_host", | ||||||
|  |         side_effect=error, | ||||||
|  |     ): | ||||||
|  |         resolver = AsyncResolver(["test.local"], 6053) | ||||||
|  |         with pytest.raises(RuntimeError, match="Unexpected error"): | ||||||
|  |             resolver.resolve() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_async_resolver_thread_timeout() -> None: | ||||||
|  |     """Test timeout when thread doesn't complete in time.""" | ||||||
|  |     # Mock the start method to prevent actual thread execution | ||||||
|  |     with ( | ||||||
|  |         patch.object(AsyncResolver, "start"), | ||||||
|  |         patch("esphome.resolver.hr.async_resolve_host"), | ||||||
|  |     ): | ||||||
|  |         resolver = AsyncResolver(["test.local"], 6053) | ||||||
|  |         # Override event.wait to simulate timeout (return False = timeout occurred) | ||||||
|  |         with ( | ||||||
|  |             patch.object(resolver.event, "wait", return_value=False), | ||||||
|  |             pytest.raises( | ||||||
|  |                 EsphomeError, match=re.escape("Timeout resolving IP address") | ||||||
|  |             ), | ||||||
|  |         ): | ||||||
|  |             resolver.resolve() | ||||||
|  |  | ||||||
|  |         # Verify thread start was called | ||||||
|  |         resolver.start.assert_called_once() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_async_resolver_ip_addresses(mock_addr_info_ipv4: AddrInfo) -> None: | ||||||
|  |     """Test resolving IP addresses.""" | ||||||
|  |     with patch( | ||||||
|  |         "esphome.resolver.hr.async_resolve_host", | ||||||
|  |         return_value=[mock_addr_info_ipv4], | ||||||
|  |     ) as mock_resolve: | ||||||
|  |         resolver = AsyncResolver(["192.168.1.100"], 6053) | ||||||
|  |         result = resolver.resolve() | ||||||
|  |  | ||||||
|  |         assert result == [mock_addr_info_ipv4] | ||||||
|  |         mock_resolve.assert_called_once_with( | ||||||
|  |             ["192.168.1.100"], 6053, timeout=RESOLVE_TIMEOUT | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test_async_resolver_mixed_addresses( | ||||||
|  |     mock_addr_info_ipv4: AddrInfo, mock_addr_info_ipv6: AddrInfo | ||||||
|  | ) -> None: | ||||||
|  |     """Test resolving mix of hostnames and IP addresses.""" | ||||||
|  |     mock_results = [mock_addr_info_ipv4, mock_addr_info_ipv6] | ||||||
|  |  | ||||||
|  |     with patch( | ||||||
|  |         "esphome.resolver.hr.async_resolve_host", | ||||||
|  |         return_value=mock_results, | ||||||
|  |     ) as mock_resolve: | ||||||
|  |         resolver = AsyncResolver(["test.local", "192.168.1.100", "::1"], 6053) | ||||||
|  |         result = resolver.resolve() | ||||||
|  |  | ||||||
|  |         assert result == mock_results | ||||||
|  |         mock_resolve.assert_called_once_with( | ||||||
|  |             ["test.local", "192.168.1.100", "::1"], 6053, timeout=RESOLVE_TIMEOUT | ||||||
|  |         ) | ||||||
		Reference in New Issue
	
	Block a user