mirror of
https://github.com/esphome/esphome.git
synced 2025-09-07 13:52:20 +01:00
more
This commit is contained in:
@@ -11,6 +11,7 @@
|
|||||||
#include <utility>
|
#include <utility>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
#include "api_timing.h"
|
||||||
#include "esphome/components/network/util.h"
|
#include "esphome/components/network/util.h"
|
||||||
#include "esphome/core/application.h"
|
#include "esphome/core/application.h"
|
||||||
#include "esphome/core/entity_base.h"
|
#include "esphome/core/entity_base.h"
|
||||||
@@ -140,7 +141,9 @@ void APIConnection::loop() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
API_TIMING_START(helper);
|
||||||
APIError err = this->helper_->loop();
|
APIError err = this->helper_->loop();
|
||||||
|
API_TIMING_END(helper, HELPER_LOOP);
|
||||||
if (err != APIError::OK) {
|
if (err != APIError::OK) {
|
||||||
on_fatal_error();
|
on_fatal_error();
|
||||||
this->log_socket_operation_failed_(err);
|
this->log_socket_operation_failed_(err);
|
||||||
@@ -149,11 +152,16 @@ void APIConnection::loop() {
|
|||||||
|
|
||||||
const uint32_t now = App.get_loop_component_start_time();
|
const uint32_t now = App.get_loop_component_start_time();
|
||||||
// Check if socket has data ready before attempting to read
|
// Check if socket has data ready before attempting to read
|
||||||
if (this->helper_->is_socket_ready()) {
|
API_TIMING_START(socket_check);
|
||||||
|
bool socket_ready = this->helper_->is_socket_ready();
|
||||||
|
API_TIMING_END(socket_check, SOCKET_CHECK);
|
||||||
|
if (socket_ready) {
|
||||||
// Read up to MAX_MESSAGES_PER_LOOP messages per loop to improve throughput
|
// Read up to MAX_MESSAGES_PER_LOOP messages per loop to improve throughput
|
||||||
for (uint8_t message_count = 0; message_count < MAX_MESSAGES_PER_LOOP; message_count++) {
|
for (uint8_t message_count = 0; message_count < MAX_MESSAGES_PER_LOOP; message_count++) {
|
||||||
ReadPacketBuffer buffer;
|
ReadPacketBuffer buffer;
|
||||||
|
API_TIMING_START(read);
|
||||||
err = this->helper_->read_packet(&buffer);
|
err = this->helper_->read_packet(&buffer);
|
||||||
|
API_TIMING_END(read, READ_PACKET);
|
||||||
if (err == APIError::WOULD_BLOCK) {
|
if (err == APIError::WOULD_BLOCK) {
|
||||||
// No more data available
|
// No more data available
|
||||||
break;
|
break;
|
||||||
@@ -164,11 +172,13 @@ void APIConnection::loop() {
|
|||||||
} else {
|
} else {
|
||||||
this->last_traffic_ = now;
|
this->last_traffic_ = now;
|
||||||
// read a packet
|
// read a packet
|
||||||
|
API_TIMING_START(process);
|
||||||
if (buffer.data_len > 0) {
|
if (buffer.data_len > 0) {
|
||||||
this->read_message(buffer.data_len, buffer.type, &buffer.container[buffer.data_offset]);
|
this->read_message(buffer.data_len, buffer.type, &buffer.container[buffer.data_offset]);
|
||||||
} else {
|
} else {
|
||||||
this->read_message(0, buffer.type, nullptr);
|
this->read_message(0, buffer.type, nullptr);
|
||||||
}
|
}
|
||||||
|
API_TIMING_END(process, PROCESS_MESSAGE);
|
||||||
if (this->flags_.remove)
|
if (this->flags_.remove)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -177,19 +187,27 @@ void APIConnection::loop() {
|
|||||||
|
|
||||||
// Process deferred batch if scheduled and timer has expired
|
// Process deferred batch if scheduled and timer has expired
|
||||||
if (this->flags_.batch_scheduled && now - this->deferred_batch_.batch_start_time >= this->get_batch_delay_ms_()) {
|
if (this->flags_.batch_scheduled && now - this->deferred_batch_.batch_start_time >= this->get_batch_delay_ms_()) {
|
||||||
|
API_TIMING_START(deferred);
|
||||||
this->process_batch_();
|
this->process_batch_();
|
||||||
|
API_TIMING_END(deferred, DEFERRED_BATCH);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!this->list_entities_iterator_.completed()) {
|
if (!this->list_entities_iterator_.completed()) {
|
||||||
|
API_TIMING_START(iterator1);
|
||||||
this->process_iterator_batch_(this->list_entities_iterator_);
|
this->process_iterator_batch_(this->list_entities_iterator_);
|
||||||
|
API_TIMING_END(iterator1, ITERATOR_BATCH);
|
||||||
} else if (!this->initial_state_iterator_.completed()) {
|
} else if (!this->initial_state_iterator_.completed()) {
|
||||||
|
API_TIMING_START(iterator2);
|
||||||
this->process_iterator_batch_(this->initial_state_iterator_);
|
this->process_iterator_batch_(this->initial_state_iterator_);
|
||||||
|
API_TIMING_END(iterator2, ITERATOR_BATCH);
|
||||||
|
|
||||||
// If we've completed initial states, process any remaining and clear the flag
|
// If we've completed initial states, process any remaining and clear the flag
|
||||||
if (this->initial_state_iterator_.completed()) {
|
if (this->initial_state_iterator_.completed()) {
|
||||||
// Process any remaining batched messages immediately
|
// Process any remaining batched messages immediately
|
||||||
if (!this->deferred_batch_.empty()) {
|
if (!this->deferred_batch_.empty()) {
|
||||||
|
API_TIMING_START(deferred2);
|
||||||
this->process_batch_();
|
this->process_batch_();
|
||||||
|
API_TIMING_END(deferred2, DEFERRED_BATCH);
|
||||||
}
|
}
|
||||||
// Now that everything is sent, enable immediate sending for future state changes
|
// Now that everything is sent, enable immediate sending for future state changes
|
||||||
this->flags_.should_try_send_immediately = true;
|
this->flags_.should_try_send_immediately = true;
|
||||||
@@ -203,6 +221,7 @@ void APIConnection::loop() {
|
|||||||
ESP_LOGW(TAG, "%s is unresponsive; disconnecting", this->get_client_combined_info().c_str());
|
ESP_LOGW(TAG, "%s is unresponsive; disconnecting", this->get_client_combined_info().c_str());
|
||||||
}
|
}
|
||||||
} else if (now - this->last_traffic_ > KEEPALIVE_TIMEOUT_MS && !this->flags_.remove) {
|
} else if (now - this->last_traffic_ > KEEPALIVE_TIMEOUT_MS && !this->flags_.remove) {
|
||||||
|
API_TIMING_GUARD(KEEPALIVE);
|
||||||
// Only send ping if we're not disconnecting
|
// Only send ping if we're not disconnecting
|
||||||
ESP_LOGVV(TAG, "Sending keepalive PING");
|
ESP_LOGVV(TAG, "Sending keepalive PING");
|
||||||
PingRequest req;
|
PingRequest req;
|
||||||
@@ -219,6 +238,7 @@ void APIConnection::loop() {
|
|||||||
|
|
||||||
#ifdef USE_CAMERA
|
#ifdef USE_CAMERA
|
||||||
if (this->image_reader_ && this->image_reader_->available() && this->helper_->can_write_without_blocking()) {
|
if (this->image_reader_ && this->image_reader_->available() && this->helper_->can_write_without_blocking()) {
|
||||||
|
API_TIMING_GUARD(CAMERA_IMAGE);
|
||||||
uint32_t to_send = std::min((size_t) MAX_BATCH_PACKET_SIZE, this->image_reader_->available());
|
uint32_t to_send = std::min((size_t) MAX_BATCH_PACKET_SIZE, this->image_reader_->available());
|
||||||
bool done = this->image_reader_->available() == to_send;
|
bool done = this->image_reader_->available() == to_send;
|
||||||
|
|
||||||
@@ -273,9 +293,11 @@ uint16_t APIConnection::encode_message_to_buffer(ProtoMessage &msg, uint8_t mess
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Calculate size
|
// Calculate size
|
||||||
|
API_TIMING_START(msg_size_calc);
|
||||||
ProtoSize size_calc;
|
ProtoSize size_calc;
|
||||||
msg.calculate_size(size_calc);
|
msg.calculate_size(size_calc);
|
||||||
uint32_t calculated_size = size_calc.get_size();
|
uint32_t calculated_size = size_calc.get_size();
|
||||||
|
API_TIMING_END(msg_size_calc, ENCODE_MESSAGE);
|
||||||
|
|
||||||
// Cache frame sizes to avoid repeated virtual calls
|
// Cache frame sizes to avoid repeated virtual calls
|
||||||
const uint8_t header_padding = conn->helper_->frame_header_padding();
|
const uint8_t header_padding = conn->helper_->frame_header_padding();
|
||||||
@@ -308,7 +330,9 @@ uint16_t APIConnection::encode_message_to_buffer(ProtoMessage &msg, uint8_t mess
|
|||||||
|
|
||||||
// Encode directly into buffer
|
// Encode directly into buffer
|
||||||
size_t size_before_encode = shared_buf.size();
|
size_t size_before_encode = shared_buf.size();
|
||||||
|
API_TIMING_START(msg_encode);
|
||||||
msg.encode({&shared_buf});
|
msg.encode({&shared_buf});
|
||||||
|
API_TIMING_END(msg_encode, ENCODE_MESSAGE);
|
||||||
|
|
||||||
// Calculate actual encoded size (not including header that was already added)
|
// Calculate actual encoded size (not including header that was already added)
|
||||||
size_t actual_payload_size = shared_buf.size() - size_before_encode;
|
size_t actual_payload_size = shared_buf.size() - size_before_encode;
|
||||||
@@ -1560,7 +1584,9 @@ bool APIConnection::send_buffer(ProtoWriteBuffer buffer, uint8_t message_type) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
API_TIMING_START(write);
|
||||||
APIError err = this->helper_->write_protobuf_packet(message_type, buffer);
|
APIError err = this->helper_->write_protobuf_packet(message_type, buffer);
|
||||||
|
API_TIMING_END(write, WRITE_PACKET);
|
||||||
if (err == APIError::WOULD_BLOCK)
|
if (err == APIError::WOULD_BLOCK)
|
||||||
return false;
|
return false;
|
||||||
if (err != APIError::OK) {
|
if (err != APIError::OK) {
|
||||||
@@ -1636,11 +1662,14 @@ void APIConnection::process_batch_() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
API_TIMING_START(buffer_clear);
|
||||||
// Try to clear buffer first
|
// Try to clear buffer first
|
||||||
if (!this->try_to_clear_buffer(true)) {
|
if (!this->try_to_clear_buffer(true)) {
|
||||||
// Can't write now, we'll try again later
|
// Can't write now, we'll try again later
|
||||||
|
API_TIMING_END(buffer_clear, BUFFER_MANAGEMENT);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
API_TIMING_END(buffer_clear, BUFFER_MANAGEMENT);
|
||||||
|
|
||||||
// Get shared buffer reference once to avoid multiple calls
|
// Get shared buffer reference once to avoid multiple calls
|
||||||
auto &shared_buf = this->parent_->get_shared_buffer_ref();
|
auto &shared_buf = this->parent_->get_shared_buffer_ref();
|
||||||
@@ -1651,8 +1680,10 @@ void APIConnection::process_batch_() {
|
|||||||
const auto &item = this->deferred_batch_[0];
|
const auto &item = this->deferred_batch_[0];
|
||||||
|
|
||||||
// Let the creator calculate size and encode if it fits
|
// Let the creator calculate size and encode if it fits
|
||||||
|
API_TIMING_START(single_encode);
|
||||||
uint16_t payload_size =
|
uint16_t payload_size =
|
||||||
item.creator(item.entity, this, std::numeric_limits<uint16_t>::max(), true, item.message_type);
|
item.creator(item.entity, this, std::numeric_limits<uint16_t>::max(), true, item.message_type);
|
||||||
|
API_TIMING_END(single_encode, BATCH_ENCODE);
|
||||||
|
|
||||||
if (payload_size > 0 && this->send_buffer(ProtoWriteBuffer{&shared_buf}, item.message_type)) {
|
if (payload_size > 0 && this->send_buffer(ProtoWriteBuffer{&shared_buf}, item.message_type)) {
|
||||||
#ifdef HAS_PROTO_MESSAGE_DUMP
|
#ifdef HAS_PROTO_MESSAGE_DUMP
|
||||||
@@ -1705,11 +1736,14 @@ void APIConnection::process_batch_() {
|
|||||||
uint32_t current_offset = 0;
|
uint32_t current_offset = 0;
|
||||||
|
|
||||||
// Process items and encode directly to buffer (up to our limit)
|
// Process items and encode directly to buffer (up to our limit)
|
||||||
|
API_TIMING_START(batch_encode_loop);
|
||||||
for (size_t i = 0; i < packets_to_process; i++) {
|
for (size_t i = 0; i < packets_to_process; i++) {
|
||||||
const auto &item = this->deferred_batch_[i];
|
const auto &item = this->deferred_batch_[i];
|
||||||
// Try to encode message
|
// Try to encode message
|
||||||
// The creator will calculate overhead to determine if the message fits
|
// The creator will calculate overhead to determine if the message fits
|
||||||
|
API_TIMING_START(item_encode);
|
||||||
uint16_t payload_size = item.creator(item.entity, this, remaining_size, false, item.message_type);
|
uint16_t payload_size = item.creator(item.entity, this, remaining_size, false, item.message_type);
|
||||||
|
API_TIMING_END(item_encode, BATCH_ENCODE);
|
||||||
|
|
||||||
if (payload_size == 0) {
|
if (payload_size == 0) {
|
||||||
// Message won't fit, stop processing
|
// Message won't fit, stop processing
|
||||||
@@ -1736,6 +1770,7 @@ void APIConnection::process_batch_() {
|
|||||||
// Current buffer size + footer space for this message
|
// Current buffer size + footer space for this message
|
||||||
current_offset = shared_buf.size() + footer_size;
|
current_offset = shared_buf.size() + footer_size;
|
||||||
}
|
}
|
||||||
|
API_TIMING_END(batch_encode_loop, BATCH_ENCODE);
|
||||||
|
|
||||||
if (items_processed == 0) {
|
if (items_processed == 0) {
|
||||||
this->deferred_batch_.clear();
|
this->deferred_batch_.clear();
|
||||||
@@ -1748,8 +1783,10 @@ void APIConnection::process_batch_() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send all collected packets
|
// Send all collected packets
|
||||||
|
API_TIMING_START(batch_write);
|
||||||
APIError err = this->helper_->write_protobuf_packets(ProtoWriteBuffer{&shared_buf},
|
APIError err = this->helper_->write_protobuf_packets(ProtoWriteBuffer{&shared_buf},
|
||||||
std::span<const PacketInfo>(packet_info, packet_count));
|
std::span<const PacketInfo>(packet_info, packet_count));
|
||||||
|
API_TIMING_END(batch_write, PROTO_WRITE);
|
||||||
if (err != APIError::OK && err != APIError::WOULD_BLOCK) {
|
if (err != APIError::OK && err != APIError::WOULD_BLOCK) {
|
||||||
on_fatal_error();
|
on_fatal_error();
|
||||||
this->log_warning_("Batch write failed", err);
|
this->log_warning_("Batch write failed", err);
|
||||||
|
@@ -6,6 +6,7 @@
|
|||||||
#include "api_pb2.h"
|
#include "api_pb2.h"
|
||||||
#include "api_pb2_service.h"
|
#include "api_pb2_service.h"
|
||||||
#include "api_server.h"
|
#include "api_server.h"
|
||||||
|
#include "api_timing.h"
|
||||||
#include "esphome/core/application.h"
|
#include "esphome/core/application.h"
|
||||||
#include "esphome/core/component.h"
|
#include "esphome/core/component.h"
|
||||||
#include "esphome/core/entity_base.h"
|
#include "esphome/core/entity_base.h"
|
||||||
@@ -335,9 +336,11 @@ class APIConnection final : public APIServerConnection {
|
|||||||
// Helper method to process multiple entities from an iterator in a batch
|
// Helper method to process multiple entities from an iterator in a batch
|
||||||
template<typename Iterator> void process_iterator_batch_(Iterator &iterator) {
|
template<typename Iterator> void process_iterator_batch_(Iterator &iterator) {
|
||||||
size_t initial_size = this->deferred_batch_.size();
|
size_t initial_size = this->deferred_batch_.size();
|
||||||
|
API_TIMING_START(iter_advance);
|
||||||
while (!iterator.completed() && (this->deferred_batch_.size() - initial_size) < MAX_INITIAL_PER_BATCH) {
|
while (!iterator.completed() && (this->deferred_batch_.size() - initial_size) < MAX_INITIAL_PER_BATCH) {
|
||||||
iterator.advance();
|
iterator.advance();
|
||||||
}
|
}
|
||||||
|
API_TIMING_END(iter_advance, ITERATOR_ENCODE);
|
||||||
|
|
||||||
// If the batch is full, process it immediately
|
// If the batch is full, process it immediately
|
||||||
// Note: iterator.advance() already calls schedule_batch_() via schedule_message_()
|
// Note: iterator.advance() already calls schedule_batch_() via schedule_message_()
|
||||||
@@ -681,6 +684,7 @@ class APIConnection final : public APIServerConnection {
|
|||||||
// Helper method to send a message either immediately or via batching
|
// Helper method to send a message either immediately or via batching
|
||||||
bool send_message_smart_(EntityBase *entity, MessageCreatorPtr creator, uint8_t message_type,
|
bool send_message_smart_(EntityBase *entity, MessageCreatorPtr creator, uint8_t message_type,
|
||||||
uint8_t estimated_size) {
|
uint8_t estimated_size) {
|
||||||
|
API_TIMING_GUARD(SEND_STATE);
|
||||||
// Try to send immediately if:
|
// Try to send immediately if:
|
||||||
// 1. It's an UpdateStateResponse (always send immediately to handle cases where
|
// 1. It's an UpdateStateResponse (always send immediately to handle cases where
|
||||||
// the main loop is blocked, e.g., during OTA updates)
|
// the main loop is blocked, e.g., during OTA updates)
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
// This file was automatically generated with a tool.
|
// This file was automatically generated with a tool.
|
||||||
// See script/api_protobuf/api_protobuf.py
|
// See script/api_protobuf/api_protobuf.py
|
||||||
#include "api_pb2_service.h"
|
#include "api_pb2_service.h"
|
||||||
|
#include "api_timing.h"
|
||||||
#include "esphome/core/log.h"
|
#include "esphome/core/log.h"
|
||||||
|
|
||||||
namespace esphome::api {
|
namespace esphome::api {
|
||||||
@@ -14,14 +15,18 @@ void APIServerConnectionBase::log_send_message_(const char *name, const std::str
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void APIServerConnectionBase::read_message(uint32_t msg_size, uint32_t msg_type, uint8_t *msg_data) {
|
void APIServerConnectionBase::read_message(uint32_t msg_size, uint32_t msg_type, uint8_t *msg_data) {
|
||||||
|
API_TIMING_START(msg_decode);
|
||||||
switch (msg_type) {
|
switch (msg_type) {
|
||||||
case HelloRequest::MESSAGE_TYPE: {
|
case HelloRequest::MESSAGE_TYPE: {
|
||||||
HelloRequest msg;
|
HelloRequest msg;
|
||||||
msg.decode(msg_data, msg_size);
|
msg.decode(msg_data, msg_size);
|
||||||
|
API_TIMING_END(msg_decode, PROTO_READ);
|
||||||
#ifdef HAS_PROTO_MESSAGE_DUMP
|
#ifdef HAS_PROTO_MESSAGE_DUMP
|
||||||
ESP_LOGVV(TAG, "on_hello_request: %s", msg.dump().c_str());
|
ESP_LOGVV(TAG, "on_hello_request: %s", msg.dump().c_str());
|
||||||
#endif
|
#endif
|
||||||
|
API_TIMING_START(msg_handle);
|
||||||
this->on_hello_request(msg);
|
this->on_hello_request(msg);
|
||||||
|
API_TIMING_END(msg_handle, MESSAGE_DISPATCH);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case ConnectRequest::MESSAGE_TYPE: {
|
case ConnectRequest::MESSAGE_TYPE: {
|
||||||
|
@@ -2,6 +2,7 @@
|
|||||||
#ifdef USE_API
|
#ifdef USE_API
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
#include "api_connection.h"
|
#include "api_connection.h"
|
||||||
|
#include "api_timing.h"
|
||||||
#include "esphome/components/network/util.h"
|
#include "esphome/components/network/util.h"
|
||||||
#include "esphome/core/application.h"
|
#include "esphome/core/application.h"
|
||||||
#include "esphome/core/defines.h"
|
#include "esphome/core/defines.h"
|
||||||
@@ -135,6 +136,7 @@ void APIServer::schedule_reboot_timeout_() {
|
|||||||
void APIServer::loop() {
|
void APIServer::loop() {
|
||||||
// Accept new clients only if the socket exists and has incoming connections
|
// Accept new clients only if the socket exists and has incoming connections
|
||||||
if (this->socket_ && this->socket_->ready()) {
|
if (this->socket_ && this->socket_->ready()) {
|
||||||
|
API_TIMING_GUARD(ACCEPT_CLIENT);
|
||||||
while (true) {
|
while (true) {
|
||||||
struct sockaddr_storage source_addr;
|
struct sockaddr_storage source_addr;
|
||||||
socklen_t addr_len = sizeof(source_addr);
|
socklen_t addr_len = sizeof(source_addr);
|
||||||
@@ -161,6 +163,7 @@ void APIServer::loop() {
|
|||||||
|
|
||||||
// Process clients and remove disconnected ones in a single pass
|
// Process clients and remove disconnected ones in a single pass
|
||||||
// Check network connectivity once for all clients
|
// Check network connectivity once for all clients
|
||||||
|
API_TIMING_START(network_check);
|
||||||
if (!network::is_connected()) {
|
if (!network::is_connected()) {
|
||||||
// Network is down - disconnect all clients
|
// Network is down - disconnect all clients
|
||||||
for (auto &client : this->clients_) {
|
for (auto &client : this->clients_) {
|
||||||
@@ -169,6 +172,7 @@ void APIServer::loop() {
|
|||||||
}
|
}
|
||||||
// Continue to process and clean up the clients below
|
// Continue to process and clean up the clients below
|
||||||
}
|
}
|
||||||
|
API_TIMING_END(network_check, NETWORK_CHECK);
|
||||||
|
|
||||||
size_t client_index = 0;
|
size_t client_index = 0;
|
||||||
while (client_index < this->clients_.size()) {
|
while (client_index < this->clients_.size()) {
|
||||||
@@ -176,12 +180,15 @@ void APIServer::loop() {
|
|||||||
|
|
||||||
if (!client->flags_.remove) {
|
if (!client->flags_.remove) {
|
||||||
// Common case: process active client
|
// Common case: process active client
|
||||||
|
API_TIMING_START(client_loop);
|
||||||
client->loop();
|
client->loop();
|
||||||
|
API_TIMING_END(client_loop, CLIENT_LOOP);
|
||||||
client_index++;
|
client_index++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rare case: handle disconnection
|
// Rare case: handle disconnection
|
||||||
|
API_TIMING_GUARD(CLIENT_REMOVAL);
|
||||||
#ifdef USE_API_CLIENT_DISCONNECTED_TRIGGER
|
#ifdef USE_API_CLIENT_DISCONNECTED_TRIGGER
|
||||||
this->client_disconnected_trigger_->trigger(client->client_info_.name, client->client_info_.peername);
|
this->client_disconnected_trigger_->trigger(client->client_info_.name, client->client_info_.peername);
|
||||||
#endif
|
#endif
|
||||||
|
@@ -4,6 +4,10 @@
|
|||||||
|
|
||||||
#include "esphome/core/component.h"
|
#include "esphome/core/component.h"
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <cinttypes>
|
||||||
|
#ifdef USE_API
|
||||||
|
#include "esphome/components/api/api_timing.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace esphome {
|
namespace esphome {
|
||||||
|
|
||||||
@@ -62,6 +66,29 @@ void RuntimeStatsCollector::log_stats_() {
|
|||||||
stats->get_period_time_ms());
|
stats->get_period_time_ms());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef USE_API
|
||||||
|
// Log detailed API timing breakdown if available
|
||||||
|
bool has_api_timing = false;
|
||||||
|
for (int i = 0; i < static_cast<int>(api::APITimingCategory::MAX_CATEGORIES); i++) {
|
||||||
|
if (api::api_timing_stats[i].count > 0) {
|
||||||
|
has_api_timing = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_api_timing) {
|
||||||
|
ESP_LOGI(TAG, "API Component Breakdown (microseconds):");
|
||||||
|
for (int i = 0; i < static_cast<int>(api::APITimingCategory::MAX_CATEGORIES); i++) {
|
||||||
|
const api::APITimingStats &stats = api::api_timing_stats[i];
|
||||||
|
if (stats.count > 0) {
|
||||||
|
const char *category_name = api::api_timing_category_to_str(static_cast<api::APITimingCategory>(i));
|
||||||
|
ESP_LOGI(TAG, " %s: count=%" PRIu32 ", avg=%.1fus, max=%" PRIu32 "us, total=%" PRIu64 "us", category_name,
|
||||||
|
stats.count, stats.get_avg_us(), stats.max_us, stats.total_us);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Log total stats since boot
|
// Log total stats since boot
|
||||||
ESP_LOGI(TAG, "Total stats (since boot):");
|
ESP_LOGI(TAG, "Total stats (since boot):");
|
||||||
|
|
||||||
@@ -81,6 +108,16 @@ void RuntimeStatsCollector::log_stats_() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RuntimeStatsCollector::reset_stats_() {
|
||||||
|
for (auto &it : this->component_stats_) {
|
||||||
|
it.second.reset_period_stats();
|
||||||
|
}
|
||||||
|
#ifdef USE_API
|
||||||
|
// Reset API timing stats for next period
|
||||||
|
api::reset_api_timing_stats();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void RuntimeStatsCollector::process_pending_stats(uint32_t current_time) {
|
void RuntimeStatsCollector::process_pending_stats(uint32_t current_time) {
|
||||||
if (this->next_log_time_ == 0)
|
if (this->next_log_time_ == 0)
|
||||||
return;
|
return;
|
||||||
|
@@ -103,11 +103,7 @@ class RuntimeStatsCollector {
|
|||||||
protected:
|
protected:
|
||||||
void log_stats_();
|
void log_stats_();
|
||||||
|
|
||||||
void reset_stats_() {
|
void reset_stats_();
|
||||||
for (auto &it : this->component_stats_) {
|
|
||||||
it.second.reset_period_stats();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use const char* keys for efficiency
|
// Use const char* keys for efficiency
|
||||||
// Custom comparator for const char* keys in map
|
// Custom comparator for const char* keys in map
|
||||||
|
Reference in New Issue
Block a user