mirror of
https://github.com/esphome/esphome.git
synced 2025-09-04 12:22:20 +01:00
[api] Optimize message buffer allocation and eliminate redundant methods
This commit is contained in:
@@ -281,34 +281,42 @@ uint16_t APIConnection::encode_message_to_buffer(ProtoMessage &msg, uint8_t mess
|
||||
const uint8_t header_padding = conn->helper_->frame_header_padding();
|
||||
const uint8_t footer_size = conn->helper_->frame_footer_size();
|
||||
|
||||
// Calculate total size with padding for buffer allocation
|
||||
size_t total_calculated_size = calculated_size + header_padding + footer_size;
|
||||
// Calculate total size with padding
|
||||
size_t total_size = calculated_size + header_padding + footer_size;
|
||||
|
||||
// Check if it fits
|
||||
if (total_calculated_size > remaining_size) {
|
||||
if (total_size > remaining_size) {
|
||||
return 0; // Doesn't fit
|
||||
}
|
||||
|
||||
// Allocate buffer space - pass payload size, allocation functions add header/footer space
|
||||
ProtoWriteBuffer buffer = is_single ? conn->allocate_single_message_buffer(calculated_size)
|
||||
: conn->allocate_batch_message_buffer(calculated_size);
|
||||
|
||||
// Get buffer size after allocation (which includes header padding)
|
||||
// Get buffer and prepare it inline
|
||||
std::vector<uint8_t> &shared_buf = conn->parent_->get_shared_buffer_ref();
|
||||
size_t size_before_encode = shared_buf.size();
|
||||
|
||||
if (is_single || conn->flags_.batch_first_message) {
|
||||
// Single message or first batch message
|
||||
shared_buf.clear();
|
||||
shared_buf.reserve(total_size);
|
||||
shared_buf.resize(header_padding);
|
||||
if (conn->flags_.batch_first_message) {
|
||||
conn->flags_.batch_first_message = false;
|
||||
}
|
||||
} else {
|
||||
// Batch message second or later
|
||||
// Add padding for previous message footer + this message header
|
||||
size_t current_size = shared_buf.size();
|
||||
shared_buf.reserve(current_size + total_size);
|
||||
shared_buf.resize(current_size + footer_size + header_padding);
|
||||
}
|
||||
|
||||
// Encode directly into buffer
|
||||
ProtoWriteBuffer buffer{&shared_buf};
|
||||
size_t size_before_encode = shared_buf.size();
|
||||
msg.encode(buffer);
|
||||
|
||||
// Calculate actual encoded size (not including header that was already added)
|
||||
size_t actual_payload_size = shared_buf.size() - size_before_encode;
|
||||
|
||||
// Return actual total size (header + actual payload + footer)
|
||||
size_t actual_total_size = header_padding + actual_payload_size + footer_size;
|
||||
|
||||
// Verify that calculate_size() returned the correct value
|
||||
assert(calculated_size == actual_payload_size);
|
||||
return static_cast<uint16_t>(actual_total_size);
|
||||
|
||||
return static_cast<uint16_t>(total_size);
|
||||
}
|
||||
|
||||
#ifdef USE_BINARY_SENSOR
|
||||
@@ -1620,14 +1628,6 @@ bool APIConnection::schedule_batch_() {
|
||||
return true;
|
||||
}
|
||||
|
||||
ProtoWriteBuffer APIConnection::allocate_single_message_buffer(uint16_t size) { return this->create_buffer(size); }
|
||||
|
||||
ProtoWriteBuffer APIConnection::allocate_batch_message_buffer(uint16_t size) {
|
||||
ProtoWriteBuffer result = this->prepare_message_buffer(size, this->flags_.batch_first_message);
|
||||
this->flags_.batch_first_message = false;
|
||||
return result;
|
||||
}
|
||||
|
||||
void APIConnection::process_batch_() {
|
||||
// Ensure PacketInfo remains trivially destructible for our placement new approach
|
||||
static_assert(std::is_trivially_destructible<PacketInfo>::value,
|
||||
@@ -1735,7 +1735,7 @@ void APIConnection::process_batch_() {
|
||||
}
|
||||
remaining_size -= payload_size;
|
||||
// Calculate where the next message's header padding will start
|
||||
// Current buffer size + footer space (that prepare_message_buffer will add for this message)
|
||||
// Current buffer size + footer space for this message
|
||||
current_offset = shared_buf.size() + footer_size;
|
||||
}
|
||||
|
||||
|
@@ -265,42 +265,11 @@ class APIConnection : public APIServerConnection {
|
||||
return {&shared_buf};
|
||||
}
|
||||
|
||||
// Prepare buffer for next message in batch
|
||||
ProtoWriteBuffer prepare_message_buffer(uint16_t message_size, bool is_first_message) {
|
||||
// Get reference to shared buffer (it maintains state between batch messages)
|
||||
std::vector<uint8_t> &shared_buf = this->parent_->get_shared_buffer_ref();
|
||||
|
||||
if (is_first_message) {
|
||||
shared_buf.clear();
|
||||
}
|
||||
|
||||
size_t current_size = shared_buf.size();
|
||||
|
||||
// Calculate padding to add:
|
||||
// - First message: just header padding
|
||||
// - Subsequent messages: footer for previous message + header padding for this message
|
||||
size_t padding_to_add = is_first_message
|
||||
? this->helper_->frame_header_padding()
|
||||
: this->helper_->frame_header_padding() + this->helper_->frame_footer_size();
|
||||
|
||||
// Reserve space for padding + message
|
||||
shared_buf.reserve(current_size + padding_to_add + message_size);
|
||||
|
||||
// Resize to add the padding bytes
|
||||
shared_buf.resize(current_size + padding_to_add);
|
||||
|
||||
return {&shared_buf};
|
||||
}
|
||||
|
||||
bool try_to_clear_buffer(bool log_out_of_space);
|
||||
bool send_buffer(ProtoWriteBuffer buffer, uint8_t message_type) override;
|
||||
|
||||
std::string get_client_combined_info() const { return this->client_info_.get_combined_info(); }
|
||||
|
||||
// Buffer allocator methods for batch processing
|
||||
ProtoWriteBuffer allocate_single_message_buffer(uint16_t size);
|
||||
ProtoWriteBuffer allocate_batch_message_buffer(uint16_t size);
|
||||
|
||||
protected:
|
||||
// Helper function to handle authentication completion
|
||||
void complete_authentication_();
|
||||
|
Reference in New Issue
Block a user