mirror of
https://github.com/esphome/esphome.git
synced 2025-10-04 19:03:47 +01:00
ble pool
This commit is contained in:
@@ -2,7 +2,6 @@
|
||||
|
||||
#include "ble.h"
|
||||
#include "ble_event_pool.h"
|
||||
#include "queue_index.h"
|
||||
|
||||
#include "esphome/core/application.h"
|
||||
#include "esphome/core/log.h"
|
||||
@@ -302,16 +301,8 @@ void ESP32BLE::loop() {
|
||||
break;
|
||||
}
|
||||
|
||||
size_t event_idx = this->ble_events_.pop();
|
||||
while (event_idx != LockFreeIndexQueue<MAX_BLE_QUEUE_SIZE>::INVALID_INDEX) {
|
||||
BLEEvent *ble_event = this->ble_event_pool_.get(event_idx);
|
||||
if (ble_event == nullptr) {
|
||||
// This should not happen - log error and continue
|
||||
ESP_LOGE(TAG, "Invalid event index: %u", static_cast<unsigned>(event_idx));
|
||||
event_idx = this->ble_events_.pop();
|
||||
continue;
|
||||
}
|
||||
|
||||
BLEEvent *ble_event = this->ble_events_.pop();
|
||||
while (ble_event != nullptr) {
|
||||
switch (ble_event->type_) {
|
||||
case BLEEvent::GATTS: {
|
||||
esp_gatts_cb_event_t event = ble_event->event_.gatts.gatts_event;
|
||||
@@ -359,8 +350,8 @@ void ESP32BLE::loop() {
|
||||
break;
|
||||
}
|
||||
// Return the event to the pool
|
||||
this->ble_event_pool_.deallocate(event_idx);
|
||||
event_idx = this->ble_events_.pop();
|
||||
this->ble_event_pool_.deallocate(ble_event);
|
||||
ble_event = this->ble_events_.pop();
|
||||
}
|
||||
if (this->advertising_ != nullptr) {
|
||||
this->advertising_->loop();
|
||||
@@ -376,10 +367,10 @@ void ESP32BLE::loop() {
|
||||
static uint32_t last_pool_log = 0;
|
||||
uint32_t now = millis();
|
||||
if (now - last_pool_log > 10000) {
|
||||
size_t created = this->ble_event_pool_.get_total_created();
|
||||
uint8_t created = this->ble_event_pool_.get_total_created();
|
||||
if (created > 0) {
|
||||
ESP_LOGD(TAG, "BLE event pool: %zu events created (peak usage), %zu currently allocated", created,
|
||||
this->ble_event_pool_.get_allocated_count());
|
||||
ESP_LOGD(TAG, "BLE event pool: %u events created (peak usage), %zu free", created,
|
||||
this->ble_event_pool_.get_free_count());
|
||||
}
|
||||
last_pool_log = now;
|
||||
}
|
||||
@@ -407,19 +398,9 @@ template<typename... Args> void enqueue_ble_event(Args... args) {
|
||||
}
|
||||
|
||||
// Allocate an event from the pool
|
||||
size_t event_idx = global_ble->ble_event_pool_.allocate();
|
||||
if (event_idx == BLEEventPool<MAX_BLE_QUEUE_SIZE>::INVALID_INDEX) {
|
||||
// Pool is full, drop the event
|
||||
global_ble->ble_events_.increment_dropped_count();
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the event object
|
||||
BLEEvent *event = global_ble->ble_event_pool_.get(event_idx);
|
||||
BLEEvent *event = global_ble->ble_event_pool_.allocate();
|
||||
if (event == nullptr) {
|
||||
// This should not happen
|
||||
ESP_LOGE(TAG, "Failed to get event from pool at index %u", static_cast<unsigned>(event_idx));
|
||||
global_ble->ble_event_pool_.deallocate(event_idx);
|
||||
// Pool is full, drop the event
|
||||
global_ble->ble_events_.increment_dropped_count();
|
||||
return;
|
||||
}
|
||||
@@ -427,12 +408,12 @@ template<typename... Args> void enqueue_ble_event(Args... args) {
|
||||
// Load new event data (replaces previous event)
|
||||
load_ble_event(event, args...);
|
||||
|
||||
// Push the event index to the queue
|
||||
if (!global_ble->ble_events_.push(event_idx)) {
|
||||
// Push the event to the queue
|
||||
if (!global_ble->ble_events_.push(event)) {
|
||||
// This should not happen in SPSC queue with single producer
|
||||
ESP_LOGE(TAG, "BLE queue push failed unexpectedly");
|
||||
// Return to pool
|
||||
global_ble->ble_event_pool_.deallocate(event_idx);
|
||||
global_ble->ble_event_pool_.deallocate(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -14,7 +14,6 @@
|
||||
#include "ble_event.h"
|
||||
#include "ble_event_pool.h"
|
||||
#include "queue.h"
|
||||
#include "queue_index.h"
|
||||
|
||||
#ifdef USE_ESP32
|
||||
|
||||
@@ -149,7 +148,7 @@ class ESP32BLE : public Component {
|
||||
std::vector<BLEStatusEventHandler *> ble_status_event_handlers_;
|
||||
BLEComponentState state_{BLE_COMPONENT_STATE_OFF};
|
||||
|
||||
LockFreeIndexQueue<MAX_BLE_QUEUE_SIZE> ble_events_;
|
||||
LockFreeQueue<BLEEvent, MAX_BLE_QUEUE_SIZE> ble_events_;
|
||||
BLEEventPool<MAX_BLE_QUEUE_SIZE> ble_event_pool_;
|
||||
BLEAdvertising *advertising_{};
|
||||
esp_ble_io_cap_t io_cap_{ESP_IO_CAP_NONE};
|
||||
|
@@ -5,6 +5,7 @@
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include "ble_event.h"
|
||||
#include "queue.h"
|
||||
#include "esphome/core/helpers.h"
|
||||
|
||||
namespace esphome {
|
||||
@@ -14,88 +15,32 @@ namespace esp32_ble {
|
||||
// Events are allocated on first use and reused thereafter, growing to peak usage
|
||||
template<uint8_t SIZE> class BLEEventPool {
|
||||
public:
|
||||
BLEEventPool() {
|
||||
// Initialize all slots as unallocated
|
||||
for (uint8_t i = 0; i < SIZE; i++) {
|
||||
this->events_[i] = nullptr;
|
||||
}
|
||||
|
||||
// Initialize the free list - all indices are initially free
|
||||
for (uint8_t i = 0; i < SIZE - 1; i++) {
|
||||
this->next_free_[i] = i + 1;
|
||||
}
|
||||
this->next_free_[SIZE - 1] = INVALID_INDEX;
|
||||
|
||||
this->free_head_.store(0, std::memory_order_relaxed);
|
||||
this->allocated_count_.store(0, std::memory_order_relaxed);
|
||||
this->total_created_.store(0, std::memory_order_relaxed);
|
||||
}
|
||||
BLEEventPool() : total_created_(0) {}
|
||||
|
||||
~BLEEventPool() {
|
||||
// Delete any events that were created
|
||||
for (uint8_t i = 0; i < SIZE; i++) {
|
||||
if (this->events_[i] != nullptr) {
|
||||
delete this->events_[i];
|
||||
}
|
||||
// Clean up any remaining events in the free list
|
||||
BLEEvent *event;
|
||||
while ((event = this->free_list_.pop()) != nullptr) {
|
||||
delete event;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate an event slot and return its index
|
||||
// Returns INVALID_INDEX if pool is full
|
||||
size_t allocate() {
|
||||
while (true) {
|
||||
uint8_t head = this->free_head_.load(std::memory_order_acquire);
|
||||
// Allocate an event from the pool
|
||||
// Returns nullptr if pool is full
|
||||
BLEEvent *allocate() {
|
||||
// Try to get from free list first
|
||||
BLEEvent *event = this->free_list_.pop();
|
||||
|
||||
if (head == INVALID_INDEX) {
|
||||
// Pool is full
|
||||
return INVALID_INDEX;
|
||||
if (event == nullptr) {
|
||||
// Need to create a new event
|
||||
if (this->total_created_ >= SIZE) {
|
||||
// Pool is at capacity
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t next = this->next_free_[head];
|
||||
|
||||
// Try to update the free list head
|
||||
if (this->free_head_.compare_exchange_weak(head, next, std::memory_order_release, std::memory_order_acquire)) {
|
||||
this->allocated_count_.fetch_add(1, std::memory_order_relaxed);
|
||||
return head;
|
||||
}
|
||||
// CAS failed, retry
|
||||
}
|
||||
}
|
||||
|
||||
// Deallocate an event slot by index
|
||||
void deallocate(size_t index) {
|
||||
if (index >= SIZE) {
|
||||
return; // Invalid index
|
||||
}
|
||||
|
||||
// No destructor call - events are reused
|
||||
// The event's reset methods handle cleanup when switching types
|
||||
|
||||
while (true) {
|
||||
uint8_t head = this->free_head_.load(std::memory_order_acquire);
|
||||
this->next_free_[index] = head;
|
||||
|
||||
// Try to add this index back to the free list
|
||||
if (this->free_head_.compare_exchange_weak(head, static_cast<uint8_t>(index), std::memory_order_release,
|
||||
std::memory_order_acquire)) {
|
||||
this->allocated_count_.fetch_sub(1, std::memory_order_relaxed);
|
||||
return;
|
||||
}
|
||||
// CAS failed, retry
|
||||
}
|
||||
}
|
||||
|
||||
// Get event by index, creating it if needed
|
||||
BLEEvent *get(size_t index) {
|
||||
if (index >= SIZE) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Create event on first access (warm-up)
|
||||
if (this->events_[index] == nullptr) {
|
||||
// Use internal RAM for better performance
|
||||
RAMAllocator<BLEEvent> allocator(RAMAllocator<BLEEvent>::ALLOC_INTERNAL);
|
||||
BLEEvent *event = allocator.allocate(1);
|
||||
event = allocator.allocate(1);
|
||||
|
||||
if (event == nullptr) {
|
||||
// Fall back to regular allocation
|
||||
@@ -105,30 +50,39 @@ template<uint8_t SIZE> class BLEEventPool {
|
||||
new (event) BLEEvent();
|
||||
}
|
||||
|
||||
this->events_[index] = event;
|
||||
this->total_created_.fetch_add(1, std::memory_order_relaxed);
|
||||
this->total_created_++;
|
||||
}
|
||||
|
||||
return this->events_[index];
|
||||
return event;
|
||||
}
|
||||
|
||||
// Get number of allocated events
|
||||
size_t get_allocated_count() const { return this->allocated_count_.load(std::memory_order_relaxed); }
|
||||
// Return an event to the pool
|
||||
void deallocate(BLEEvent *event) {
|
||||
if (event == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Events are reused - the load methods handle cleanup
|
||||
// Just return to free list
|
||||
if (!this->free_list_.push(event)) {
|
||||
// This should not happen if pool size matches queue size
|
||||
// But if it does, delete the event to prevent leak
|
||||
delete event;
|
||||
}
|
||||
}
|
||||
|
||||
// Get total number of events created (high water mark)
|
||||
size_t get_total_created() const { return this->total_created_.load(std::memory_order_relaxed); }
|
||||
uint8_t get_total_created() const { return this->total_created_; }
|
||||
|
||||
static constexpr uint8_t INVALID_INDEX = 0xFF; // 255, which is > MAX_BLE_QUEUE_SIZE (64)
|
||||
// Get number of events in the free list
|
||||
size_t get_free_count() const { return this->free_list_.size(); }
|
||||
|
||||
private:
|
||||
BLEEvent *events_[SIZE]; // Array of pointers, allocated on demand
|
||||
uint8_t next_free_[SIZE]; // Next free index for each slot
|
||||
std::atomic<uint8_t> free_head_; // Head of the free list
|
||||
std::atomic<uint8_t> allocated_count_; // Number of currently allocated events
|
||||
std::atomic<uint8_t> total_created_; // Total events created (high water mark)
|
||||
LockFreeQueue<BLEEvent, SIZE> free_list_; // Free events ready for reuse
|
||||
uint8_t total_created_; // Total events created (high water mark)
|
||||
};
|
||||
|
||||
} // namespace esp32_ble
|
||||
} // namespace esphome
|
||||
|
||||
#endif
|
||||
#endif
|
@@ -1,81 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef USE_ESP32
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
|
||||
namespace esphome {
|
||||
namespace esp32_ble {
|
||||
|
||||
// Lock-free SPSC queue that stores indices instead of pointers
|
||||
// This allows us to use a pre-allocated pool of objects
|
||||
template<uint8_t SIZE> class LockFreeIndexQueue {
|
||||
public:
|
||||
static constexpr uint8_t INVALID_INDEX = 0xFF; // 255, which is > MAX_BLE_QUEUE_SIZE (64)
|
||||
|
||||
LockFreeIndexQueue() : head_(0), tail_(0), dropped_count_(0) {
|
||||
// Initialize all slots to invalid
|
||||
for (uint8_t i = 0; i < SIZE; i++) {
|
||||
buffer_[i] = INVALID_INDEX;
|
||||
}
|
||||
}
|
||||
|
||||
bool push(size_t index) {
|
||||
if (index == INVALID_INDEX || index >= SIZE)
|
||||
return false;
|
||||
|
||||
uint8_t current_tail = tail_.load(std::memory_order_relaxed);
|
||||
uint8_t next_tail = (current_tail + 1) % SIZE;
|
||||
|
||||
if (next_tail == head_.load(std::memory_order_acquire)) {
|
||||
// Buffer full
|
||||
dropped_count_.fetch_add(1, std::memory_order_relaxed);
|
||||
return false;
|
||||
}
|
||||
|
||||
buffer_[current_tail] = static_cast<uint8_t>(index);
|
||||
tail_.store(next_tail, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t pop() {
|
||||
uint8_t current_head = head_.load(std::memory_order_relaxed);
|
||||
|
||||
if (current_head == tail_.load(std::memory_order_acquire)) {
|
||||
return INVALID_INDEX; // Empty
|
||||
}
|
||||
|
||||
uint8_t index = buffer_[current_head];
|
||||
head_.store((current_head + 1) % SIZE, std::memory_order_release);
|
||||
return index;
|
||||
}
|
||||
|
||||
size_t size() const {
|
||||
uint8_t tail = tail_.load(std::memory_order_acquire);
|
||||
uint8_t head = head_.load(std::memory_order_acquire);
|
||||
return (tail - head + SIZE) % SIZE;
|
||||
}
|
||||
|
||||
uint32_t get_and_reset_dropped_count() { return dropped_count_.exchange(0, std::memory_order_relaxed); }
|
||||
|
||||
void increment_dropped_count() { dropped_count_.fetch_add(1, std::memory_order_relaxed); }
|
||||
|
||||
bool empty() const { return head_.load(std::memory_order_acquire) == tail_.load(std::memory_order_acquire); }
|
||||
|
||||
bool full() const {
|
||||
uint8_t next_tail = (tail_.load(std::memory_order_relaxed) + 1) % SIZE;
|
||||
return next_tail == head_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
protected:
|
||||
uint8_t buffer_[SIZE];
|
||||
std::atomic<uint8_t> head_;
|
||||
std::atomic<uint8_t> tail_;
|
||||
std::atomic<uint32_t> dropped_count_; // Keep this as uint32_t for larger counts
|
||||
};
|
||||
|
||||
} // namespace esp32_ble
|
||||
} // namespace esphome
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user